From: Nitin Rawat <nitin.rawat@oss.qualcomm.com>
To: vkoul@kernel.org, kishon@kernel.org, mani@kernel.org,
neil.armstrong@linaro.org, dmitry.baryshkov@oss.qualcomm.com,
andersson@kernel.org, konradybcio@kernel.org
Cc: linux-arm-msm@vger.kernel.org, linux-phy@lists.infradead.org,
linux-kernel@vger.kernel.org,
Nitin Rawat <quic_nitirawa@quicinc.com>
Subject: [PATCH V4 2/2] phy: qcom-qmp-ufs: Add PHY and PLL regulator load
Date: Sat, 30 Aug 2025 12:33:53 +0530 [thread overview]
Message-ID: <20250830070353.2694-3-nitin.rawat@oss.qualcomm.com> (raw)
In-Reply-To: <20250830070353.2694-1-nitin.rawat@oss.qualcomm.com>
From: Nitin Rawat <quic_nitirawa@quicinc.com>
Add phy and pll regulator load voting support for all supported
platforms by introducing dedicated regulator bulk data arrays
with their load values.
This ensures stable operation and proper power management for these
platforms where regulators are shared between the QMP UFS PHY and
other IP blocks by setting appropriate regulator load currents during
PHY operations.
Signed-off-by: Nitin Rawat <quic_nitirawa@quicinc.com>
---
drivers/phy/qualcomm/phy-qcom-qmp-ufs.c | 138 ++++++++++++++++++------
1 file changed, 104 insertions(+), 34 deletions(-)
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
index aaa88ca0ef07..8a280433a42b 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -1164,10 +1164,80 @@ static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
readl(base + offset);
}
-/* Default regulator bulk data (no load used) */
-static const struct regulator_bulk_data qmp_phy_vreg_l[] = {
- { .supply = "vdda-phy" },
- { .supply = "vdda-pll" },
+/* Regulator bulk data with load values for specific configurations */
+static const struct regulator_bulk_data msm8996_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 51400 },
+ { .supply = "vdda-pll", .init_load_uA = 14600 },
+};
+
+static const struct regulator_bulk_data sa8775p_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 137000 },
+ { .supply = "vdda-pll", .init_load_uA = 18300 },
+};
+
+static const struct regulator_bulk_data sc7280_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 97500 },
+ { .supply = "vdda-pll", .init_load_uA = 18400 },
+};
+
+static const struct regulator_bulk_data sc8280xp_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 85700 },
+ { .supply = "vdda-pll", .init_load_uA = 18300 },
+};
+
+static const struct regulator_bulk_data sdm845_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 51400 },
+ { .supply = "vdda-pll", .init_load_uA = 14600 },
+};
+
+static const struct regulator_bulk_data sm6115_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 51400 },
+ { .supply = "vdda-pll", .init_load_uA = 14200 },
+};
+
+static const struct regulator_bulk_data sm7150_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 62900 },
+ { .supply = "vdda-pll", .init_load_uA = 18300 },
+};
+
+static const struct regulator_bulk_data sm8150_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 90200 },
+ { .supply = "vdda-pll", .init_load_uA = 19000 },
+};
+
+static const struct regulator_bulk_data sm8250_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 89900 },
+ { .supply = "vdda-pll", .init_load_uA = 18800 },
+};
+
+static const struct regulator_bulk_data sm8350_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 91600 },
+ { .supply = "vdda-pll", .init_load_uA = 19000 },
+};
+
+static const struct regulator_bulk_data sm8450_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 173000 },
+ { .supply = "vdda-pll", .init_load_uA = 24900 },
+};
+
+static const struct regulator_bulk_data sm8475_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 213030 },
+ { .supply = "vdda-pll", .init_load_uA = 18340 },
+};
+
+static const struct regulator_bulk_data sm8550_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 188000 },
+ { .supply = "vdda-pll", .init_load_uA = 18300 },
+};
+
+static const struct regulator_bulk_data sm8650_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 205000 },
+ { .supply = "vdda-pll", .init_load_uA = 17500 },
+};
+
+static const struct regulator_bulk_data sm8750_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 213000 },
+ { .supply = "vdda-pll", .init_load_uA = 18300 },
};
static const struct qmp_ufs_offsets qmp_ufs_offsets = {
@@ -1203,8 +1273,8 @@ static const struct qmp_phy_cfg msm8996_ufsphy_cfg = {
.rx_num = ARRAY_SIZE(msm8996_ufsphy_rx),
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = msm8996_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_ufsphy_vreg_l),
.regs = ufsphy_v2_regs_layout,
@@ -1240,8 +1310,8 @@ static const struct qmp_phy_cfg sa8775p_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sa8775p_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sa8775p_ufsphy_vreg_l),
.regs = ufsphy_v5_regs_layout,
};
@@ -1274,8 +1344,8 @@ static const struct qmp_phy_cfg sc7280_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sc7280_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sc7280_ufsphy_vreg_l),
.regs = ufsphy_v4_regs_layout,
};
@@ -1308,8 +1378,8 @@ static const struct qmp_phy_cfg sc8280xp_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sc8280xp_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sc8280xp_ufsphy_vreg_l),
.regs = ufsphy_v5_regs_layout,
};
@@ -1333,8 +1403,8 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
.serdes = sdm845_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sdm845_ufsphy_hs_b_serdes),
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sdm845_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sdm845_ufsphy_vreg_l),
.regs = ufsphy_v3_regs_layout,
.no_pcs_sw_reset = true,
@@ -1360,8 +1430,8 @@ static const struct qmp_phy_cfg sm6115_ufsphy_cfg = {
.serdes = sm6115_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sm6115_ufsphy_hs_b_serdes),
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm6115_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm6115_ufsphy_vreg_l),
.regs = ufsphy_v2_regs_layout,
.no_pcs_sw_reset = true,
@@ -1387,8 +1457,8 @@ static const struct qmp_phy_cfg sm7150_ufsphy_cfg = {
.serdes = sdm845_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sdm845_ufsphy_hs_b_serdes),
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm7150_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm7150_ufsphy_vreg_l),
.regs = ufsphy_v3_regs_layout,
.no_pcs_sw_reset = true,
@@ -1423,8 +1493,8 @@ static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8150_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8150_ufsphy_vreg_l),
.regs = ufsphy_v4_regs_layout,
};
@@ -1457,8 +1527,8 @@ static const struct qmp_phy_cfg sm8250_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8250_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8250_ufsphy_vreg_l),
.regs = ufsphy_v4_regs_layout,
};
@@ -1491,8 +1561,8 @@ static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8350_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8350_ufsphy_vreg_l),
.regs = ufsphy_v5_regs_layout,
};
@@ -1525,8 +1595,8 @@ static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8450_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8450_ufsphy_vreg_l),
.regs = ufsphy_v5_regs_layout,
};
@@ -1561,8 +1631,8 @@ static const struct qmp_phy_cfg sm8475_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8475_ufsphy_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8475_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8475_ufsphy_vreg_l),
.regs = ufsphy_v6_regs_layout,
};
@@ -1606,8 +1676,8 @@ static const struct qmp_phy_cfg sm8550_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8550_ufsphy_g5_pcs),
.max_gear = UFS_HS_G5,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8550_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8550_ufsphy_vreg_l),
.regs = ufsphy_v6_regs_layout,
};
@@ -1638,8 +1708,8 @@ static const struct qmp_phy_cfg sm8650_ufsphy_cfg = {
.max_gear = UFS_HS_G5,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8650_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8650_ufsphy_vreg_l),
.regs = ufsphy_v6_regs_layout,
};
@@ -1676,8 +1746,8 @@ static const struct qmp_phy_cfg sm8750_ufsphy_cfg = {
.max_gear = UFS_HS_G5,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8750_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8750_ufsphy_vreg_l),
.regs = ufsphy_v6_regs_layout,
};
--
2.50.1
next prev parent reply other threads:[~2025-08-30 7:04 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-30 7:03 [PATCH V4 0/2] Add regulator load support for QMP UFS PHY Nitin Rawat
2025-08-30 7:03 ` [PATCH V4 1/2] phy: qcom-qmp-ufs: Add regulator load voting for UFS QMP PHY Nitin Rawat
2025-08-30 7:03 ` Nitin Rawat [this message]
2025-08-30 7:07 ` [PATCH V4 2/2] phy: qcom-qmp-ufs: Add PHY and PLL regulator load Manivannan Sadhasivam
2025-09-01 17:08 ` [PATCH V4 0/2] Add regulator load support for QMP UFS PHY Vinod Koul
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250830070353.2694-3-nitin.rawat@oss.qualcomm.com \
--to=nitin.rawat@oss.qualcomm.com \
--cc=andersson@kernel.org \
--cc=dmitry.baryshkov@oss.qualcomm.com \
--cc=kishon@kernel.org \
--cc=konradybcio@kernel.org \
--cc=linux-arm-msm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-phy@lists.infradead.org \
--cc=mani@kernel.org \
--cc=neil.armstrong@linaro.org \
--cc=quic_nitirawa@quicinc.com \
--cc=vkoul@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).