Linux ARM-MSM sub-architecture
 help / color / mirror / Atom feed
From: Luo Jie <quic_luoj@quicinc.com>
To: <agross@kernel.org>, <andersson@kernel.org>,
	<konrad.dybcio@linaro.org>, <davem@davemloft.net>,
	<edumazet@google.com>, <kuba@kernel.org>, <pabeni@redhat.com>,
	<robh+dt@kernel.org>, <krzysztof.kozlowski+dt@linaro.org>,
	<conor+dt@kernel.org>, <corbet@lwn.net>,
	<catalin.marinas@arm.com>, <will@kernel.org>,
	<p.zabel@pengutronix.de>, <linux@armlinux.org.uk>,
	<shannon.nelson@amd.com>, <anthony.l.nguyen@intel.com>,
	<jasowang@redhat.com>, <brett.creeley@amd.com>,
	<rrameshbabu@nvidia.com>, <joshua.a.hay@intel.com>,
	<arnd@arndb.de>, <geert+renesas@glider.be>,
	<neil.armstrong@linaro.org>, <dmitry.baryshkov@linaro.org>,
	<nfraprado@collabora.com>, <m.szyprowski@samsung.com>,
	<u-kumar1@ti.com>, <jacob.e.keller@intel.com>, <andrew@lunn.ch>
Cc: <netdev@vger.kernel.org>, <linux-arm-msm@vger.kernel.org>,
	<devicetree@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
	<linux-doc@vger.kernel.org>,
	<linux-arm-kernel@lists.infradead.org>, <ryazanov.s.a@gmail.com>,
	<ansuelsmth@gmail.com>, <quic_kkumarcs@quicinc.com>,
	<quic_suruchia@quicinc.com>, <quic_soni@quicinc.com>,
	<quic_pavir@quicinc.com>, <quic_souravp@quicinc.com>,
	<quic_linchen@quicinc.com>, <quic_leiwei@quicinc.com>
Subject: [PATCH net-next 08/20] net: ethernet: qualcomm: Add PPE scheduler config
Date: Wed, 10 Jan 2024 19:40:20 +0800	[thread overview]
Message-ID: <20240110114033.32575-9-quic_luoj@quicinc.com> (raw)
In-Reply-To: <20240110114033.32575-1-quic_luoj@quicinc.com>

PPE scheduler is configured according to the device tree. This
configuration is read and used for initialization by PPE driver,
and adjusted later by the EDMA driver.

PPE scheduler config determines the priority of scheduling the
packet. PPE supports two level QoS hierarchy, Level 0 and Level 1.
The scheduler config helps with the construction of the PPE QoS
hierarchies for each physical port.

Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
---
 drivers/net/ethernet/qualcomm/ppe/Makefile   |   2 +-
 drivers/net/ethernet/qualcomm/ppe/ppe.c      | 194 ++++++++++++++++-
 drivers/net/ethernet/qualcomm/ppe/ppe_ops.c  | 206 +++++++++++++++++++
 drivers/net/ethernet/qualcomm/ppe/ppe_ops.h  |  45 ++++
 drivers/net/ethernet/qualcomm/ppe/ppe_regs.h |  64 ++++++
 5 files changed, 508 insertions(+), 3 deletions(-)
 create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_ops.c
 create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_ops.h

diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
index 795aff6501e4..c00265339aa7 100644
--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
@@ -4,4 +4,4 @@
 #
 
 obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
-qcom-ppe-objs := ppe.o
+qcom-ppe-objs := ppe.o ppe_ops.o
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.c b/drivers/net/ethernet/qualcomm/ppe/ppe.c
index 8bf32a7265d2..75c24a87e2be 100644
--- a/drivers/net/ethernet/qualcomm/ppe/ppe.c
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
@@ -15,8 +15,13 @@
 #include <linux/soc/qcom/ppe.h>
 #include "ppe.h"
 #include "ppe_regs.h"
+#include "ppe_ops.h"
 
 #define PPE_SCHEDULER_PORT_NUM		8
+#define PPE_SCHEDULER_L0_NUM		300
+#define PPE_SCHEDULER_L1_NUM		64
+#define PPE_SP_PRIORITY_NUM		8
+
 static const char * const ppe_clock_name[PPE_CLK_MAX] = {
 	"cmn_ahb",
 	"cmn_sys",
@@ -794,17 +799,202 @@ static int of_parse_ppe_scheduler_resource(struct ppe_device *ppe_dev,
 	return 0;
 }
 
+static int of_parse_ppe_scheduler_group_config(struct ppe_device *ppe_dev,
+					       struct device_node *group_node,
+					       int port,
+					       const char *node_name,
+					       const char *loop_name)
+{
+	struct ppe_qos_scheduler_cfg qos_cfg;
+	const struct ppe_queue_ops *ppe_queue_ops;
+	const __be32 *paddr;
+	int ret, len, i, node_id, level, node_max;
+	u32 tmp_cfg[5], pri_loop, max_pri;
+
+	ppe_queue_ops = ppe_queue_config_ops_get();
+	if (!ppe_queue_ops->queue_scheduler_set)
+		return -EINVAL;
+
+	/* The value of the property node_name can be single value
+	 * or array value.
+	 *
+	 * If the array value is defined, the property loop_name should not
+	 * be specified.
+	 *
+	 * If the single value is defined, the queue ID will be added in the
+	 * loop value defined by the loop_name.
+	 */
+	paddr = of_get_property(group_node, node_name, &len);
+	if (!paddr)
+		return dev_err_probe(ppe_dev->dev, -ENODEV,
+				     "Fail to get queue %s of port %d\n",
+				     node_name, port);
+
+	len /= sizeof(u32);
+
+	/* There are two levels scheduler configs, the level 0 scheduler
+	 * config is configured on the queue, the level 1 scheduler is
+	 * configured on the flow that is from the output of level 0
+	 * scheduler.
+	 */
+	if (!strcmp(node_name, "qcom,flow")) {
+		level = 1;
+		node_max = PPE_SCHEDULER_L1_NUM;
+	} else {
+		level = 0;
+		node_max = PPE_SCHEDULER_L0_NUM;
+	}
+
+	if (of_property_read_u32_array(group_node, "qcom,scheduler-config",
+				       tmp_cfg, ARRAY_SIZE(tmp_cfg)))
+		return dev_err_probe(ppe_dev->dev, -ENODEV,
+				     "Fail to get qcom,scheduler-config of port %d\n",
+				     port);
+
+	if (of_property_read_u32(group_node, loop_name, &pri_loop)) {
+		for (i = 0; i < len; i++) {
+			node_id = be32_to_cpup(paddr + i);
+			if (node_id >= node_max)
+				return dev_err_probe(ppe_dev->dev, -EINVAL,
+						     "Invalid node ID %d of port %d\n",
+						     node_id, port);
+
+			memset(&qos_cfg, 0, sizeof(qos_cfg));
+
+			qos_cfg.sp_id = tmp_cfg[0];
+			qos_cfg.c_pri = tmp_cfg[1];
+			qos_cfg.c_drr_id = tmp_cfg[2];
+			qos_cfg.e_pri = tmp_cfg[3];
+			qos_cfg.e_drr_id = tmp_cfg[4];
+			qos_cfg.c_drr_wt = 1;
+			qos_cfg.e_drr_wt = 1;
+			ret = ppe_queue_ops->queue_scheduler_set(ppe_dev,
+								 node_id,
+								 level,
+								 port,
+								 qos_cfg);
+			if (ret)
+				return dev_err_probe(ppe_dev->dev, ret,
+						     "scheduler set fail on node ID %d\n",
+						     node_id);
+		}
+	} else {
+		/* Only one base node ID allowed to loop. */
+		if (len != 1)
+			return dev_err_probe(ppe_dev->dev, -EINVAL,
+					"Multiple node ID defined to loop for port %d\n",
+					port);
+
+		/* Property qcom,drr-max-priority is optional for loop,
+		 * if not defined, the default value PPE_SP_PRIORITY_NUM
+		 * is used.
+		 */
+		max_pri = PPE_SP_PRIORITY_NUM;
+		of_property_read_u32(group_node, "qcom,drr-max-priority", &max_pri);
+
+		node_id = be32_to_cpup(paddr);
+		if (node_id >= node_max)
+			return dev_err_probe(ppe_dev->dev, -EINVAL,
+					"Invalid node ID %d defined to loop for port %d\n",
+					node_id, port);
+
+		for (i = 0; i < pri_loop; i++) {
+			memset(&qos_cfg, 0, sizeof(qos_cfg));
+
+			qos_cfg.sp_id = tmp_cfg[0] + i / max_pri;
+			qos_cfg.c_pri = tmp_cfg[1] + i % max_pri;
+			qos_cfg.c_drr_id = tmp_cfg[2] + i;
+			qos_cfg.e_pri = tmp_cfg[3] + i % max_pri;
+			qos_cfg.e_drr_id = tmp_cfg[4] + i;
+			qos_cfg.c_drr_wt = 1;
+			qos_cfg.e_drr_wt = 1;
+			ret = ppe_queue_ops->queue_scheduler_set(ppe_dev,
+								 node_id + i,
+								 level,
+								 port,
+								 qos_cfg);
+			if (ret)
+				return dev_err_probe(ppe_dev->dev, ret,
+						     "scheduler set fail on node ID %d\n",
+						     node_id + i);
+		}
+	}
+
+	return 0;
+}
+
+static int of_parse_ppe_scheduler_config(struct ppe_device *ppe_dev,
+					 struct device_node *port_node)
+{
+	struct device_node *scheduler_node, *child;
+	int port, ret;
+
+	if (of_property_read_u32(port_node, "port-id", &port))
+		return dev_err_probe(ppe_dev->dev, -ENODEV,
+				     "Fail to get port-id of l0scheduler\n");
+
+	scheduler_node = of_get_child_by_name(port_node, "l0scheduler");
+	if (!scheduler_node)
+		return dev_err_probe(ppe_dev->dev, -ENODEV,
+				     "Fail to get l0scheduler config\n");
+
+	for_each_available_child_of_node(scheduler_node, child) {
+		ret = of_parse_ppe_scheduler_group_config(ppe_dev, child, port,
+							  "qcom,ucast-queue",
+							  "qcom,ucast-loop-priority");
+		if (ret)
+			return ret;
+
+		ret = of_parse_ppe_scheduler_group_config(ppe_dev, child, port,
+							  "qcom,mcast-queue",
+							  "qcom,mcast-loop-priority");
+		if (ret)
+			return ret;
+	}
+
+	scheduler_node = of_get_child_by_name(port_node, "l1scheduler");
+	if (!scheduler_node)
+		return dev_err_probe(ppe_dev->dev, -ENODEV,
+				     "Fail to get l1scheduler config\n");
+
+	for_each_available_child_of_node(scheduler_node, child) {
+		ret = of_parse_ppe_scheduler_group_config(ppe_dev, child, port,
+							  "qcom,flow",
+							  "qcom,flow-loop-priority");
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
 static int of_parse_ppe_scheduler(struct ppe_device *ppe_dev,
 				  struct device_node *ppe_node)
 {
-	struct device_node *scheduler_node;
+	struct device_node *scheduler_node, *port_node;
+	int ret;
 
 	scheduler_node = of_get_child_by_name(ppe_node, "port-scheduler-resource");
 	if (!scheduler_node)
 		return dev_err_probe(ppe_dev->dev, -ENODEV,
 				     "port-scheduler-resource is not defined\n");
 
-	return of_parse_ppe_scheduler_resource(ppe_dev, scheduler_node);
+	ret = of_parse_ppe_scheduler_resource(ppe_dev, scheduler_node);
+	if (ret)
+		return ret;
+
+	scheduler_node = of_get_child_by_name(ppe_node, "port-scheduler-config");
+	if (!scheduler_node)
+		return dev_err_probe(ppe_dev->dev, -ENODEV,
+				     "port-scheduler-config is not defined\n");
+
+	for_each_available_child_of_node(scheduler_node, port_node) {
+		ret = of_parse_ppe_scheduler_config(ppe_dev, port_node);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
 }
 
 static int of_parse_ppe_config(struct ppe_device *ppe_dev,
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_ops.c b/drivers/net/ethernet/qualcomm/ppe/ppe_ops.c
new file mode 100644
index 000000000000..7853c2fdcc63
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_ops.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/* Low level PPE operations made available to higher level network drivers
+ * such as ethernet or QoS drivers.
+ */
+
+#include <linux/soc/qcom/ppe.h>
+#include "ppe_ops.h"
+#include "ppe_regs.h"
+#include "ppe.h"
+
+static int ppe_scheduler_l0_queue_map_set(struct ppe_device *ppe_dev,
+					  int node_id, int port,
+					  struct ppe_qos_scheduler_cfg scheduler_cfg)
+{
+	u32 val, index;
+
+	if (node_id >= PPE_L0_FLOW_MAP_TBL_NUM)
+		return -EINVAL;
+
+	val = FIELD_PREP(PPE_L0_FLOW_MAP_TBL_SP_ID, scheduler_cfg.sp_id) |
+			 FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_PRI, scheduler_cfg.c_pri) |
+			 FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_PRI, scheduler_cfg.e_pri) |
+			 FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_DRR_WT, scheduler_cfg.c_drr_wt) |
+			 FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_DRR_WT, scheduler_cfg.e_drr_wt);
+	index = PPE_L0_FLOW_MAP_TBL + node_id * PPE_L0_FLOW_MAP_TBL_INC;
+	ppe_write(ppe_dev, index, val);
+
+	val = FIELD_PREP(PPE_L0_C_SP_CFG_TBL_DRR_ID, scheduler_cfg.c_drr_id) |
+			 FIELD_PREP(PPE_L0_C_SP_CFG_TBL_DRR_CREDIT_UNIT, scheduler_cfg.c_drr_unit);
+	index = PPE_L0_C_SP_CFG_TBL +
+		(scheduler_cfg.sp_id * 8 + scheduler_cfg.c_pri) * PPE_L0_C_SP_CFG_TBL_INC;
+	ppe_write(ppe_dev, index, val);
+
+	val = FIELD_PREP(PPE_L0_E_SP_CFG_TBL_DRR_ID, scheduler_cfg.e_drr_id) |
+			 FIELD_PREP(PPE_L0_E_SP_CFG_TBL_DRR_CREDIT_UNIT, scheduler_cfg.e_drr_unit);
+	index = PPE_L0_E_SP_CFG_TBL +
+		(scheduler_cfg.sp_id * 8 + scheduler_cfg.e_pri) * PPE_L0_E_SP_CFG_TBL_INC;
+	ppe_write(ppe_dev, index, val);
+
+	val = FIELD_PREP(PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM, port);
+	index = PPE_L0_FLOW_PORT_MAP_TBL + node_id * PPE_L0_FLOW_PORT_MAP_TBL_INC;
+	ppe_write(ppe_dev, index, val);
+
+	index = PPE_L0_COMP_CFG_TBL + node_id * PPE_L0_COMP_CFG_TBL_INC;
+	return ppe_mask(ppe_dev, index, PPE_L0_COMP_CFG_TBL_DRR_METER_LEN,
+			FIELD_PREP(PPE_L0_COMP_CFG_TBL_DRR_METER_LEN,
+				   scheduler_cfg.drr_frame_mode));
+}
+
+static int ppe_scheduler_l0_queue_map_get(struct ppe_device *ppe_dev,
+					  int node_id, int *port,
+					  struct ppe_qos_scheduler_cfg *scheduler_cfg)
+{
+	u32 val, index;
+
+	if (node_id >= PPE_L0_FLOW_MAP_TBL_NUM)
+		return -EINVAL;
+
+	index = PPE_L0_FLOW_MAP_TBL + node_id * PPE_L0_FLOW_MAP_TBL_INC;
+	ppe_read(ppe_dev, index, &val);
+	scheduler_cfg->sp_id = FIELD_GET(PPE_L0_FLOW_MAP_TBL_SP_ID, val);
+	scheduler_cfg->c_pri = FIELD_GET(PPE_L0_FLOW_MAP_TBL_C_PRI, val);
+	scheduler_cfg->e_pri = FIELD_GET(PPE_L0_FLOW_MAP_TBL_E_PRI, val);
+	scheduler_cfg->c_drr_wt = FIELD_GET(PPE_L0_FLOW_MAP_TBL_C_DRR_WT, val);
+	scheduler_cfg->e_drr_wt = FIELD_GET(PPE_L0_FLOW_MAP_TBL_E_DRR_WT, val);
+
+	index = PPE_L0_C_SP_CFG_TBL +
+		(scheduler_cfg->sp_id * 8 + scheduler_cfg->c_pri) * PPE_L0_C_SP_CFG_TBL_INC;
+	ppe_read(ppe_dev, index, &val);
+	scheduler_cfg->c_drr_id = FIELD_GET(PPE_L0_C_SP_CFG_TBL_DRR_ID, val);
+	scheduler_cfg->c_drr_unit = FIELD_GET(PPE_L0_C_SP_CFG_TBL_DRR_CREDIT_UNIT, val);
+
+	index = PPE_L0_E_SP_CFG_TBL +
+		(scheduler_cfg->sp_id * 8 + scheduler_cfg->e_pri) * PPE_L0_E_SP_CFG_TBL_INC;
+	ppe_read(ppe_dev, index, &val);
+	scheduler_cfg->e_drr_id = FIELD_GET(PPE_L0_E_SP_CFG_TBL_DRR_ID, val);
+	scheduler_cfg->e_drr_unit = FIELD_GET(PPE_L0_E_SP_CFG_TBL_DRR_CREDIT_UNIT, val);
+
+	index = PPE_L0_FLOW_PORT_MAP_TBL + node_id * PPE_L0_FLOW_PORT_MAP_TBL_INC;
+	ppe_read(ppe_dev, index, &val);
+	*port = FIELD_GET(PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM, val);
+
+	index = PPE_L0_COMP_CFG_TBL + node_id * PPE_L0_COMP_CFG_TBL_INC;
+	ppe_read(ppe_dev, index, &val);
+	scheduler_cfg->drr_frame_mode = FIELD_GET(PPE_L0_COMP_CFG_TBL_DRR_METER_LEN, val);
+
+	return 0;
+}
+
+static int ppe_scheduler_l1_queue_map_set(struct ppe_device *ppe_dev,
+					  int node_id, int port,
+					  struct ppe_qos_scheduler_cfg scheduler_cfg)
+{
+	u32 val, index;
+
+	if (node_id >= PPE_L1_FLOW_MAP_TBL_NUM)
+		return -EINVAL;
+
+	val = FIELD_PREP(PPE_L1_FLOW_MAP_TBL_SP_ID, scheduler_cfg.sp_id) |
+			 FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_PRI, scheduler_cfg.c_pri) |
+			 FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_PRI, scheduler_cfg.e_pri) |
+			 FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_DRR_WT, scheduler_cfg.c_drr_wt) |
+			 FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_DRR_WT, scheduler_cfg.e_drr_wt);
+	index = PPE_L1_FLOW_MAP_TBL + node_id * PPE_L1_FLOW_MAP_TBL_INC;
+	ppe_write(ppe_dev, index, val);
+
+	val = FIELD_PREP(PPE_L1_C_SP_CFG_TBL_DRR_ID, scheduler_cfg.c_drr_id) |
+			 FIELD_PREP(PPE_L1_C_SP_CFG_TBL_DRR_CREDIT_UNIT, scheduler_cfg.c_drr_unit);
+	index = PPE_L1_C_SP_CFG_TBL +
+		(scheduler_cfg.sp_id * 8 + scheduler_cfg.c_pri) * PPE_L1_C_SP_CFG_TBL_INC;
+	ppe_write(ppe_dev, index, val);
+
+	val = FIELD_PREP(PPE_L1_E_SP_CFG_TBL_DRR_ID, scheduler_cfg.e_drr_id) |
+		FIELD_PREP(PPE_L1_E_SP_CFG_TBL_DRR_CREDIT_UNIT, scheduler_cfg.e_drr_unit);
+	index = PPE_L1_E_SP_CFG_TBL +
+		(scheduler_cfg.sp_id * 8 + scheduler_cfg.e_pri) * PPE_L1_E_SP_CFG_TBL_INC;
+	ppe_write(ppe_dev, index, val);
+
+	val = FIELD_PREP(PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM, port);
+	index = PPE_L1_FLOW_PORT_MAP_TBL + node_id * PPE_L1_FLOW_PORT_MAP_TBL_INC;
+	ppe_write(ppe_dev, index, val);
+
+	index = PPE_L1_COMP_CFG_TBL + node_id * PPE_L1_COMP_CFG_TBL_INC;
+	return ppe_mask(ppe_dev, index, PPE_L1_COMP_CFG_TBL_DRR_METER_LEN,
+			FIELD_PREP(PPE_L1_COMP_CFG_TBL_DRR_METER_LEN,
+				   scheduler_cfg.drr_frame_mode));
+}
+
+static int ppe_scheduler_l1_queue_map_get(struct ppe_device *ppe_dev,
+					  int node_id, int *port,
+					  struct ppe_qos_scheduler_cfg *scheduler_cfg)
+{
+	u32 val, index;
+
+	if (node_id >= PPE_L1_FLOW_MAP_TBL_NUM)
+		return -EINVAL;
+
+	index = PPE_L1_FLOW_MAP_TBL + node_id * PPE_L1_FLOW_MAP_TBL_INC;
+	ppe_read(ppe_dev, index, &val);
+	scheduler_cfg->sp_id = FIELD_GET(PPE_L1_FLOW_MAP_TBL_SP_ID, val);
+	scheduler_cfg->c_pri = FIELD_GET(PPE_L1_FLOW_MAP_TBL_C_PRI, val);
+	scheduler_cfg->e_pri = FIELD_GET(PPE_L1_FLOW_MAP_TBL_E_PRI, val);
+	scheduler_cfg->c_drr_wt = FIELD_GET(PPE_L1_FLOW_MAP_TBL_C_DRR_WT, val);
+	scheduler_cfg->e_drr_wt = FIELD_GET(PPE_L1_FLOW_MAP_TBL_E_DRR_WT, val);
+
+	index = PPE_L1_C_SP_CFG_TBL +
+		(scheduler_cfg->sp_id * 8 + scheduler_cfg->c_pri) * PPE_L1_C_SP_CFG_TBL_INC;
+	ppe_read(ppe_dev, index, &val);
+	scheduler_cfg->c_drr_id = FIELD_GET(PPE_L1_C_SP_CFG_TBL_DRR_ID, val);
+	scheduler_cfg->c_drr_unit = FIELD_GET(PPE_L1_C_SP_CFG_TBL_DRR_CREDIT_UNIT, val);
+
+	index = PPE_L1_E_SP_CFG_TBL +
+		(scheduler_cfg->sp_id * 8 + scheduler_cfg->e_pri) * PPE_L1_E_SP_CFG_TBL_INC;
+	ppe_read(ppe_dev, index, &val);
+	scheduler_cfg->e_drr_id = FIELD_GET(PPE_L1_E_SP_CFG_TBL_DRR_ID, val);
+	scheduler_cfg->e_drr_unit = FIELD_GET(PPE_L1_E_SP_CFG_TBL_DRR_CREDIT_UNIT, val);
+
+	index = PPE_L1_FLOW_PORT_MAP_TBL + node_id * PPE_L1_FLOW_PORT_MAP_TBL_INC;
+	ppe_read(ppe_dev, index, &val);
+	*port = FIELD_GET(PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM, val);
+
+	index = PPE_L1_COMP_CFG_TBL + node_id * PPE_L1_COMP_CFG_TBL_INC;
+	ppe_read(ppe_dev, index, &val);
+	scheduler_cfg->drr_frame_mode = FIELD_GET(PPE_L1_COMP_CFG_TBL_DRR_METER_LEN, val);
+
+	return 0;
+}
+
+static int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+				   int node_id, int level, int port,
+				   struct ppe_qos_scheduler_cfg scheduler_cfg)
+{
+	if (level == 0)
+		return ppe_scheduler_l0_queue_map_set(ppe_dev, node_id, port, scheduler_cfg);
+	else if (level == 1)
+		return ppe_scheduler_l1_queue_map_set(ppe_dev, node_id, port, scheduler_cfg);
+	else
+		return -EINVAL;
+}
+
+static int ppe_queue_scheduler_get(struct ppe_device *ppe_dev,
+				   int node_id, int level, int *port,
+				   struct ppe_qos_scheduler_cfg *scheduler_cfg)
+{
+	if (level == 0)
+		return ppe_scheduler_l0_queue_map_get(ppe_dev, node_id, port, scheduler_cfg);
+	else if (level == 1)
+		return ppe_scheduler_l1_queue_map_get(ppe_dev, node_id, port, scheduler_cfg);
+	else
+		return -EINVAL;
+}
+
+static const struct ppe_queue_ops qcom_ppe_queue_config_ops = {
+	.queue_scheduler_set = ppe_queue_scheduler_set,
+	.queue_scheduler_get = ppe_queue_scheduler_get,
+};
+
+const struct ppe_queue_ops *ppe_queue_config_ops_get(void)
+{
+	return &qcom_ppe_queue_config_ops;
+}
+EXPORT_SYMBOL_GPL(ppe_queue_config_ops_get);
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_ops.h b/drivers/net/ethernet/qualcomm/ppe/ppe_ops.h
new file mode 100644
index 000000000000..4980e3fed1c0
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_ops.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/* Low level PPE operations to be used by higher level network drivers
+ * such as ethernet or QoS drivers.
+ */
+
+#ifndef __PPE_OPS_H__
+#define __PPE_OPS_H__
+
+/* PPE hardware QoS configurations used to dispatch the packet passed
+ * through PPE, the scheduler supports DRR(deficit round robin with the
+ * weight) and SP(strict priority).
+ */
+struct ppe_qos_scheduler_cfg {
+	int sp_id;
+	int e_pri;
+	int c_pri;
+	int c_drr_id;
+	int e_drr_id;
+	int e_drr_wt;
+	int c_drr_wt;
+	int c_drr_unit;
+	int e_drr_unit;
+	int drr_frame_mode;
+};
+
+/* The operations are used to configure the PPE queue related resource */
+struct ppe_queue_ops {
+	int (*queue_scheduler_set)(struct ppe_device *ppe_dev,
+				   int node_id,
+				   int level,
+				   int port,
+				   struct ppe_qos_scheduler_cfg scheduler_cfg);
+	int (*queue_scheduler_get)(struct ppe_device *ppe_dev,
+				   int node_id,
+				   int level,
+				   int *port,
+				   struct ppe_qos_scheduler_cfg *scheduler_cfg);
+};
+
+const struct ppe_queue_ops *ppe_queue_config_ops_get(void);
+#endif
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
index 589f92a4f607..10daa70f28e9 100644
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
@@ -31,11 +31,75 @@
 #define PPE_PSCH_TDM_DEPTH_CFG_INC				4
 #define PPE_PSCH_TDM_DEPTH_CFG_TDM_DEPTH			GENMASK(7, 0)
 
+#define PPE_L0_FLOW_MAP_TBL					0x402000
+#define PPE_L0_FLOW_MAP_TBL_NUM					300
+#define PPE_L0_FLOW_MAP_TBL_INC					0x10
+#define PPE_L0_FLOW_MAP_TBL_SP_ID				GENMASK(5, 0)
+#define PPE_L0_FLOW_MAP_TBL_C_PRI				GENMASK(8, 6)
+#define PPE_L0_FLOW_MAP_TBL_E_PRI				GENMASK(11, 9)
+#define PPE_L0_FLOW_MAP_TBL_C_DRR_WT				GENMASK(21, 12)
+#define PPE_L0_FLOW_MAP_TBL_E_DRR_WT				GENMASK(31, 22)
+
+#define PPE_L0_C_SP_CFG_TBL					0x404000
+#define PPE_L0_C_SP_CFG_TBL_NUM					512
+#define PPE_L0_C_SP_CFG_TBL_INC					0x10
+#define PPE_L0_C_SP_CFG_TBL_DRR_ID				GENMASK(7, 0)
+#define PPE_L0_C_SP_CFG_TBL_DRR_CREDIT_UNIT			BIT(8)
+
+#define PPE_L0_E_SP_CFG_TBL					0x406000
+#define PPE_L0_E_SP_CFG_TBL_NUM					512
+#define PPE_L0_E_SP_CFG_TBL_INC					0x10
+#define PPE_L0_E_SP_CFG_TBL_DRR_ID				GENMASK(7, 0)
+#define PPE_L0_E_SP_CFG_TBL_DRR_CREDIT_UNIT			BIT(8)
+
+#define PPE_L0_FLOW_PORT_MAP_TBL				0x408000
+#define PPE_L0_FLOW_PORT_MAP_TBL_NUM				300
+#define PPE_L0_FLOW_PORT_MAP_TBL_INC				0x10
+#define PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM			GENMASK(3, 0)
+
+#define PPE_L0_COMP_CFG_TBL					0x428000
+#define PPE_L0_COMP_CFG_TBL_NUM					300
+#define PPE_L0_COMP_CFG_TBL_INC					0x10
+#define PPE_L0_COMP_CFG_TBL_SHAPER_METER_LEN			GENMASK(1, 0)
+#define PPE_L0_COMP_CFG_TBL_DRR_METER_LEN			GENMASK(3, 2)
+
 #define PPE_DEQ_OPR_TBL						0x430000
 #define PPE_DEQ_OPR_TBL_NUM					300
 #define PPE_DEQ_OPR_TBL_INC					0x10
 #define PPE_ENQ_OPR_TBL_DEQ_DISABLE				BIT(0)
 
+#define PPE_L1_FLOW_MAP_TBL					0x440000
+#define PPE_L1_FLOW_MAP_TBL_NUM					64
+#define PPE_L1_FLOW_MAP_TBL_INC					0x10
+#define PPE_L1_FLOW_MAP_TBL_SP_ID				GENMASK(3, 0)
+#define PPE_L1_FLOW_MAP_TBL_C_PRI				GENMASK(6, 4)
+#define PPE_L1_FLOW_MAP_TBL_E_PRI				GENMASK(9, 7)
+#define PPE_L1_FLOW_MAP_TBL_C_DRR_WT				GENMASK(19, 10)
+#define PPE_L1_FLOW_MAP_TBL_E_DRR_WT				GENMASK(29, 20)
+
+#define PPE_L1_C_SP_CFG_TBL					0x442000
+#define PPE_L1_C_SP_CFG_TBL_NUM					64
+#define PPE_L1_C_SP_CFG_TBL_INC					0x10
+#define PPE_L1_C_SP_CFG_TBL_DRR_ID				GENMASK(5, 0)
+#define PPE_L1_C_SP_CFG_TBL_DRR_CREDIT_UNIT			BIT(6)
+
+#define PPE_L1_E_SP_CFG_TBL					0x444000
+#define PPE_L1_E_SP_CFG_TBL_NUM					64
+#define PPE_L1_E_SP_CFG_TBL_INC					0x10
+#define PPE_L1_E_SP_CFG_TBL_DRR_ID				GENMASK(5, 0)
+#define PPE_L1_E_SP_CFG_TBL_DRR_CREDIT_UNIT			BIT(6)
+
+#define PPE_L1_FLOW_PORT_MAP_TBL				0x446000
+#define PPE_L1_FLOW_PORT_MAP_TBL_NUM				64
+#define PPE_L1_FLOW_PORT_MAP_TBL_INC				0x10
+#define PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM			GENMASK(3, 0)
+
+#define PPE_L1_COMP_CFG_TBL					0x46a000
+#define PPE_L1_COMP_CFG_TBL_NUM					64
+#define PPE_L1_COMP_CFG_TBL_INC					0x10
+#define PPE_L1_COMP_CFG_TBL_SHAPER_METER_LEN			GENMASK(1, 0)
+#define PPE_L1_COMP_CFG_TBL_DRR_METER_LEN			GENMASK(3, 2)
+
 #define PPE_PSCH_TDM_CFG_TBL					0x47a000
 #define PPE_PSCH_TDM_CFG_TBL_NUM				128
 #define PPE_PSCH_TDM_CFG_TBL_INC				0x10
-- 
2.42.0


  parent reply	other threads:[~2024-01-10 11:43 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-01-10 11:40 [PATCH net-next 00/20] net: ethernet: Add qcom PPE driver Luo Jie
2024-01-10 11:40 ` [PATCH net-next 01/20] Documentation: networking: qcom PPE driver documentation Luo Jie
2024-01-10 11:40 ` [PATCH net-next 02/20] dt-bindings: net: qcom,ppe: Add bindings yaml file Luo Jie
2024-01-10 12:22   ` Krzysztof Kozlowski
2024-01-22 13:55     ` Jie Luo
2024-01-22 14:25       ` Andrew Lunn
2024-01-10 13:01   ` Rob Herring
2024-01-10 11:40 ` [PATCH net-next 03/20] net: ethernet: qualcomm: Add qcom PPE driver Luo Jie
2024-01-10 11:40 ` [PATCH net-next 04/20] net: ethernet: qualcomm: Add PPE buffer manager configuration Luo Jie
2024-01-10 11:40 ` [PATCH net-next 05/20] net: ethernet: qualcomm: Add PPE queue management config Luo Jie
2024-01-10 11:40 ` [PATCH net-next 06/20] net: ethernet: qualcomm: Add PPE TDM config Luo Jie
2024-01-10 11:40 ` [PATCH net-next 07/20] net: ethernet: qualcomm: Add PPE port scheduler resource Luo Jie
2024-01-10 11:40 ` Luo Jie [this message]
2024-01-10 11:40 ` [PATCH net-next 09/20] net: ethernet: qualcomm: Add PPE queue config Luo Jie
2024-01-10 11:40 ` [PATCH net-next 10/20] net: ethernet: qualcomm: Add PPE service code config Luo Jie
2024-01-10 11:40 ` [PATCH net-next 11/20] net: ethernet: qualcomm: Add PPE port control config Luo Jie
2024-01-10 11:40 ` [PATCH net-next 12/20] net: ethernet: qualcomm: Add PPE RSS hash config Luo Jie
2024-01-10 11:40 ` [PATCH net-next 13/20] net: ethernet: qualcomm: Export PPE function set_maxframe Luo Jie
2024-01-10 11:40 ` [PATCH net-next 14/20] net: ethernet: qualcomm: Add PPE AC(admission control) function Luo Jie
2024-01-10 11:40 ` [PATCH net-next 15/20] net: ethernet: qualcomm: Add PPE debugfs counters Luo Jie
2024-01-10 11:40 ` [PATCH net-next 16/20] net: ethernet: qualcomm: Add PPE L2 bridge initialization Luo Jie
2024-01-10 11:40 ` [PATCH net-next 17/20] net: ethernet: qualcomm: Add PPE UNIPHY support for phylink Luo Jie
2024-01-10 12:09   ` Russell King (Oracle)
2024-01-22 14:37     ` Lei Wei
2024-01-10 11:40 ` [PATCH net-next 18/20] net: ethernet: qualcomm: Add PPE MAC " Luo Jie
2024-01-10 12:18   ` Russell King (Oracle)
2024-01-22 15:01     ` Lei Wei
2024-01-22 17:36       ` Russell King (Oracle)
2024-01-10 11:40 ` [PATCH net-next 19/20] net: ethernet: qualcomm: Add PPE MAC functions Luo Jie
2024-01-10 11:40 ` [PATCH net-next 20/20] arm64: defconfig: Enable qcom PPE driver Luo Jie
2024-01-10 12:24 ` [PATCH net-next 00/20] net: ethernet: Add " Krzysztof Kozlowski
2024-01-10 15:44   ` Simon Horman
2024-01-12 15:49     ` Jie Luo
2024-01-10 22:24 ` Jakub Kicinski
2024-01-11 15:49   ` Jie Luo
2024-01-12 17:56     ` Christian Marangi
2024-01-17 15:25       ` Jie Luo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240110114033.32575-9-quic_luoj@quicinc.com \
    --to=quic_luoj@quicinc.com \
    --cc=agross@kernel.org \
    --cc=andersson@kernel.org \
    --cc=andrew@lunn.ch \
    --cc=ansuelsmth@gmail.com \
    --cc=anthony.l.nguyen@intel.com \
    --cc=arnd@arndb.de \
    --cc=brett.creeley@amd.com \
    --cc=catalin.marinas@arm.com \
    --cc=conor+dt@kernel.org \
    --cc=corbet@lwn.net \
    --cc=davem@davemloft.net \
    --cc=devicetree@vger.kernel.org \
    --cc=dmitry.baryshkov@linaro.org \
    --cc=edumazet@google.com \
    --cc=geert+renesas@glider.be \
    --cc=jacob.e.keller@intel.com \
    --cc=jasowang@redhat.com \
    --cc=joshua.a.hay@intel.com \
    --cc=konrad.dybcio@linaro.org \
    --cc=krzysztof.kozlowski+dt@linaro.org \
    --cc=kuba@kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-arm-msm@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@armlinux.org.uk \
    --cc=m.szyprowski@samsung.com \
    --cc=neil.armstrong@linaro.org \
    --cc=netdev@vger.kernel.org \
    --cc=nfraprado@collabora.com \
    --cc=p.zabel@pengutronix.de \
    --cc=pabeni@redhat.com \
    --cc=quic_kkumarcs@quicinc.com \
    --cc=quic_leiwei@quicinc.com \
    --cc=quic_linchen@quicinc.com \
    --cc=quic_pavir@quicinc.com \
    --cc=quic_soni@quicinc.com \
    --cc=quic_souravp@quicinc.com \
    --cc=quic_suruchia@quicinc.com \
    --cc=robh+dt@kernel.org \
    --cc=rrameshbabu@nvidia.com \
    --cc=ryazanov.s.a@gmail.com \
    --cc=shannon.nelson@amd.com \
    --cc=u-kumar1@ti.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox