From: Shai Malin <smalin@marvell.com>
To: <netdev@vger.kernel.org>, <linux-nvme@lists.infradead.org>,
<davem@davemloft.net>, <kuba@kernel.org>, <sagi@grimberg.me>,
<hch@lst.de>, <axboe@fb.com>, <kbusch@kernel.org>
Cc: <aelior@marvell.com>, <mkalderon@marvell.com>,
<okulkarni@marvell.com>, <pkushwaha@marvell.com>,
<malin1024@gmail.com>, <smalin@marvell.com>
Subject: [RFC PATCH v5 21/27] qedn: Add support of configuring HW filter block
Date: Wed, 19 May 2021 14:13:34 +0300 [thread overview]
Message-ID: <20210519111340.20613-22-smalin@marvell.com> (raw)
In-Reply-To: <20210519111340.20613-1-smalin@marvell.com>
From: Prabhakar Kushwaha <pkushwaha@marvell.com>
HW filter can be configured to filter TCP packets based on either
source or target TCP port. QEDN leverage this feature to route
NVMeTCP traffic.
This patch configures HW filter block based on source port for all
receiving packets to deliver correct QEDN PF.
Acked-by: Igor Russkikh <irusskikh@marvell.com>
Signed-off-by: Prabhakar Kushwaha <pkushwaha@marvell.com>
Signed-off-by: Omkar Kulkarni <okulkarni@marvell.com>
Signed-off-by: Michal Kalderon <mkalderon@marvell.com>
Signed-off-by: Ariel Elior <aelior@marvell.com>
Signed-off-by: Shai Malin <smalin@marvell.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
---
drivers/nvme/hw/qedn/qedn.h | 15 ++++
drivers/nvme/hw/qedn/qedn_main.c | 114 ++++++++++++++++++++++++++++++-
2 files changed, 128 insertions(+), 1 deletion(-)
diff --git a/drivers/nvme/hw/qedn/qedn.h b/drivers/nvme/hw/qedn/qedn.h
index ac1880d1bde9..82261fa95f5d 100644
--- a/drivers/nvme/hw/qedn/qedn.h
+++ b/drivers/nvme/hw/qedn/qedn.h
@@ -38,6 +38,11 @@
#define QEDN_IRQ_NAME_LEN 24
#define QEDN_IRQ_NO_FLAGS 0
+/* HW defines */
+
+/* QEDN_MAX_LLH_PORTS will be extended in future */
+#define QEDN_MAX_LLH_PORTS 16
+
/* Destroy connection defines */
#define QEDN_NON_ABORTIVE_TERMINATION 0
#define QEDN_ABORTIVE_TERMINATION 1
@@ -77,6 +82,7 @@
enum qedn_state {
QEDN_STATE_CORE_PROBED = 0,
QEDN_STATE_CORE_OPEN,
+ QEDN_STATE_LLH_PORT_FILTER_SET,
QEDN_STATE_MFW_STATE,
QEDN_STATE_NVMETCP_OPEN,
QEDN_STATE_IRQ_SET,
@@ -108,6 +114,8 @@ struct qedn_ctx {
/* Accessed with atomic bit ops, used with enum qedn_state */
unsigned long state;
+ u8 num_llh_filters;
+ struct list_head llh_filter_list;
u8 local_mac_addr[ETH_ALEN];
u16 mtu;
@@ -174,6 +182,12 @@ enum qedn_conn_state {
CONN_STATE_DESTROY_COMPLETE
};
+struct qedn_llh_filter {
+ struct list_head entry;
+ u16 port;
+ u16 ref_cnt;
+};
+
struct qedn_ctrl {
struct list_head glb_entry;
struct list_head pf_entry;
@@ -253,5 +267,6 @@ int qedn_initialize_endpoint(struct qedn_endpoint *ep, u8 *local_mac_addr,
int qedn_wait_for_conn_est(struct qedn_conn_ctx *conn_ctx);
int qedn_set_con_state(struct qedn_conn_ctx *conn_ctx, enum qedn_conn_state new_state);
void qedn_terminate_connection(struct qedn_conn_ctx *conn_ctx);
+__be16 qedn_get_in_port(struct sockaddr_storage *sa);
#endif /* _QEDN_H_ */
diff --git a/drivers/nvme/hw/qedn/qedn_main.c b/drivers/nvme/hw/qedn/qedn_main.c
index f132c049601f..85126f488f9b 100644
--- a/drivers/nvme/hw/qedn/qedn_main.c
+++ b/drivers/nvme/hw/qedn/qedn_main.c
@@ -22,6 +22,81 @@ static struct pci_device_id qedn_pci_tbl[] = {
{0, 0},
};
+__be16 qedn_get_in_port(struct sockaddr_storage *sa)
+{
+ return sa->ss_family == AF_INET
+ ? ((struct sockaddr_in *)sa)->sin_port
+ : ((struct sockaddr_in6 *)sa)->sin6_port;
+}
+
+struct qedn_llh_filter *qedn_add_llh_filter(struct qedn_ctx *qedn, u16 tcp_port)
+{
+ struct qedn_llh_filter *llh_filter = NULL;
+ struct qedn_llh_filter *llh_tmp = NULL;
+ bool new_filter = 1;
+ int rc = 0;
+
+ /* Check if LLH filter already defined */
+ list_for_each_entry_safe(llh_filter, llh_tmp, &qedn->llh_filter_list, entry) {
+ if (llh_filter->port == tcp_port) {
+ new_filter = 0;
+ llh_filter->ref_cnt++;
+ break;
+ }
+ }
+
+ if (new_filter) {
+ if (qedn->num_llh_filters >= QEDN_MAX_LLH_PORTS) {
+ pr_err("PF reached the max target ports limit %u. %u\n",
+ qedn->dev_info.common.abs_pf_id,
+ qedn->num_llh_filters);
+
+ return NULL;
+ }
+
+ rc = qed_ops->add_src_tcp_port_filter(qedn->cdev, tcp_port);
+ if (rc) {
+ pr_err("LLH port configuration failed. port:%u; rc:%u\n", tcp_port, rc);
+
+ return NULL;
+ }
+
+ llh_filter = kzalloc(sizeof(*llh_filter), GFP_KERNEL);
+ if (!llh_filter) {
+ qed_ops->remove_src_tcp_port_filter(qedn->cdev, tcp_port);
+
+ return NULL;
+ }
+
+ llh_filter->port = tcp_port;
+ llh_filter->ref_cnt = 1;
+ ++qedn->num_llh_filters;
+ list_add_tail(&llh_filter->entry, &qedn->llh_filter_list);
+ set_bit(QEDN_STATE_LLH_PORT_FILTER_SET, &qedn->state);
+ }
+
+ return llh_filter;
+}
+
+void qedn_dec_llh_filter(struct qedn_ctx *qedn, struct qedn_llh_filter *llh_filter)
+{
+ if (!llh_filter)
+ return;
+
+ llh_filter->ref_cnt--;
+ if (!llh_filter->ref_cnt) {
+ list_del(&llh_filter->entry);
+
+ /* Remove LLH protocol port filter */
+ qed_ops->remove_src_tcp_port_filter(qedn->cdev, llh_filter->port);
+
+ --qedn->num_llh_filters;
+ kfree(llh_filter);
+ if (!qedn->num_llh_filters)
+ clear_bit(QEDN_STATE_LLH_PORT_FILTER_SET, &qedn->state);
+ }
+}
+
static bool qedn_matches_qede(struct qedn_ctx *qedn, struct pci_dev *qede_pdev)
{
struct pci_dev *qedn_pdev = qedn->pdev;
@@ -88,8 +163,10 @@ qedn_claim_dev(struct nvme_tcp_ofld_dev *dev,
static int qedn_setup_ctrl(struct nvme_tcp_ofld_ctrl *ctrl, bool new)
{
struct nvme_tcp_ofld_dev *dev = ctrl->dev;
+ struct qedn_llh_filter *llh_filter = NULL;
struct qedn_ctrl *qctrl = NULL;
struct qedn_ctx *qedn = NULL;
+ __be16 remote_port;
int rc = 0;
if (new) {
@@ -116,15 +193,42 @@ static int qedn_setup_ctrl(struct nvme_tcp_ofld_ctrl *ctrl, bool new)
qedn = (struct qedn_ctx *)dev->private_data;
qctrl->qedn = qedn;
- /* Placeholder - setup LLH filter */
+ if (qedn->num_llh_filters == 0) {
+ qedn->mtu = dev->ndev->mtu;
+ memcpy(qedn->local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
+ }
+
+ remote_port = qedn_get_in_port(&ctrl->conn_params.remote_ip_addr);
+ if (new) {
+ llh_filter = qedn_add_llh_filter(qedn, ntohs(remote_port));
+ if (!llh_filter) {
+ rc = -EFAULT;
+ goto err_out;
+ }
+
+ qctrl->llh_filter = llh_filter;
+ set_bit(LLH_FILTER, &qctrl->agg_state);
+ }
return 0;
+
+err_out:
+ flush_workqueue(qctrl->sp_wq);
+ kfree(qctrl);
+
+ return rc;
}
static int qedn_release_ctrl(struct nvme_tcp_ofld_ctrl *ctrl)
{
struct qedn_ctrl *qctrl = (struct qedn_ctrl *)ctrl->private_data;
+ if (test_and_clear_bit(LLH_FILTER, &qctrl->agg_state) &&
+ qctrl->llh_filter) {
+ qedn_dec_llh_filter(qctrl->qedn, qctrl->llh_filter);
+ qctrl->llh_filter = NULL;
+ }
+
if (test_and_clear_bit(QEDN_STATE_SP_WORK_THREAD_SET, &qctrl->agg_state))
flush_workqueue(qctrl->sp_wq);
@@ -405,6 +509,8 @@ static int qedn_setup_irq(struct qedn_ctx *qedn)
static inline void qedn_init_pf_struct(struct qedn_ctx *qedn)
{
+ INIT_LIST_HEAD(&qedn->llh_filter_list);
+ qedn->num_llh_filters = 0;
hash_init(qedn->conn_ctx_hash);
}
@@ -650,6 +756,12 @@ static void __qedn_remove(struct pci_dev *pdev)
return;
}
+ if (test_and_clear_bit(QEDN_STATE_LLH_PORT_FILTER_SET, &qedn->state)) {
+ pr_err("LLH port configuration removal. %d filters still set\n",
+ qedn->num_llh_filters);
+ qed_ops->clear_all_filters(qedn->cdev);
+ }
+
if (test_and_clear_bit(QEDN_STATE_REGISTERED_OFFLOAD_DEV, &qedn->state))
nvme_tcp_ofld_unregister_dev(&qedn->qedn_ofld_dev);
--
2.22.0
_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
next prev parent reply other threads:[~2021-05-19 12:37 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-05-19 11:13 [RFC PATCH v5 00/27] NVMeTCP Offload ULP and QEDN Device Driver Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 01/27] nvme-tcp-offload: Add nvme-tcp-offload - NVMeTCP HW offload ULP Shai Malin
2021-05-21 17:06 ` Himanshu Madhani
2021-05-24 20:11 ` Shai Malin
2021-05-21 22:13 ` Sagi Grimberg
2021-05-24 20:08 ` Shai Malin
2021-06-08 9:28 ` Petr Mladek
2021-05-19 11:13 ` [RFC PATCH v5 02/27] nvme-fabrics: Move NVMF_ALLOWED_OPTS and NVMF_REQUIRED_OPTS definitions Shai Malin
2021-05-21 17:08 ` Himanshu Madhani
2021-05-21 22:15 ` Sagi Grimberg
2021-05-19 11:13 ` [RFC PATCH v5 03/27] nvme-tcp-offload: Add device scan implementation Shai Malin
2021-05-21 17:22 ` Himanshu Madhani
2021-05-21 22:22 ` Sagi Grimberg
2021-05-24 20:14 ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 04/27] nvme-tcp-offload: Add controller level implementation Shai Malin
2021-05-21 17:19 ` Himanshu Madhani
2021-05-21 22:31 ` Sagi Grimberg
2021-05-27 20:03 ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 05/27] nvme-tcp-offload: Add controller level error recovery implementation Shai Malin
2021-05-21 17:42 ` Himanshu Madhani
2021-05-21 22:34 ` Sagi Grimberg
2021-05-19 11:13 ` [RFC PATCH v5 06/27] nvme-tcp-offload: Add queue level implementation Shai Malin
2021-05-21 18:18 ` Himanshu Madhani
2021-05-21 22:48 ` Sagi Grimberg
2021-05-24 20:16 ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 07/27] nvme-tcp-offload: Add IO " Shai Malin
2021-05-21 18:26 ` Himanshu Madhani
2021-05-19 11:13 ` [RFC PATCH v5 08/27] nvme-tcp-offload: Add Timeout and ASYNC Support Shai Malin
2021-05-21 18:36 ` Himanshu Madhani
2021-05-21 22:51 ` Sagi Grimberg
2021-05-24 20:17 ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 09/27] qed: Add TCP_ULP FW resource layout Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 10/27] qed: Add NVMeTCP Offload PF Level FW and HW HSI Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 11/27] qed: Add NVMeTCP Offload Connection " Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 12/27] qed: Add support of HW filter block Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 13/27] qed: Add NVMeTCP Offload IO Level FW and HW HSI Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 14/27] qed: Add NVMeTCP Offload IO Level FW Initializations Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 15/27] qed: Add IP services APIs support Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 16/27] qedn: Add qedn - Marvell's NVMeTCP HW offload vendor driver Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 17/27] qedn: Add qedn probe Shai Malin
2021-05-19 12:31 ` Leon Romanovsky
2021-05-19 14:29 ` Shai Malin
2021-05-19 15:31 ` Leon Romanovsky
2021-05-19 11:13 ` [RFC PATCH v5 18/27] qedn: Add qedn_claim_dev API support Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 19/27] qedn: Add IRQ and fast-path resources initializations Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 20/27] qedn: Add connection-level slowpath functionality Shai Malin
2021-05-19 11:13 ` Shai Malin [this message]
2021-05-19 11:13 ` [RFC PATCH v5 22/27] qedn: Add IO level qedn_send_req and fw_cq workqueue Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 23/27] qedn: Add support of Task and SGL Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 24/27] qedn: Add support of NVME ICReq & ICResp Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 25/27] qedn: Add IO level fastpath functionality Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 26/27] qedn: Add Connection and IO level recovery flows Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 27/27] qedn: Add support of ASYNC Shai Malin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210519111340.20613-22-smalin@marvell.com \
--to=smalin@marvell.com \
--cc=aelior@marvell.com \
--cc=axboe@fb.com \
--cc=davem@davemloft.net \
--cc=hch@lst.de \
--cc=kbusch@kernel.org \
--cc=kuba@kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=malin1024@gmail.com \
--cc=mkalderon@marvell.com \
--cc=netdev@vger.kernel.org \
--cc=okulkarni@marvell.com \
--cc=pkushwaha@marvell.com \
--cc=sagi@grimberg.me \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox