From: jsmart2021@gmail.com
To: linux-scsi@vger.kernel.org, linux-nvme@lists.infradead.org,
sagi@grimberg.me, martin.petersen@oracle.com
Cc: James Smart <jsmart2021@gmail.com>,
Dick Kennedy <dick.kennedy@broadcom.com>,
James Smart <james.smart@broadcom.com>
Subject: [PATCH-v1 17/22] Fix max_sgl_segments settings for NVME / NVMET
Date: Wed, 19 Apr 2017 21:46:36 -0700 [thread overview]
Message-ID: <20170420044641.10657-18-jsmart2021@gmail.com> (raw)
In-Reply-To: <20170420044641.10657-1-jsmart2021@gmail.com>
From: James Smart <jsmart2021@gmail.com>
Cannot set NVME segment counts to a large number
The existing module parameter lpfc_sg_seg_cnt is used for both
SCSI and NVME.
Limit the module parameter lpfc_sg_seg_cnt to 128 with the
default being 64 for both NVME and NVMET, assuming NVME is enabled in the
driver for that port. The driver will set max_sgl_segments in the
NVME/NVMET template to lpfc_sg_seg_cnt + 1.
Looks good,
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
---
drivers/scsi/lpfc/lpfc.h | 3 ++-
drivers/scsi/lpfc/lpfc_nvme.c | 18 ++++++++++++++----
drivers/scsi/lpfc/lpfc_nvmet.c | 19 +++++++++++++++----
3 files changed, 31 insertions(+), 9 deletions(-)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 257bbdd..0fc1450 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -56,7 +56,7 @@ struct lpfc_sli2_slim;
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
-#define LPFC_MIN_NVME_SEG_CNT 254
+#define LPFC_MAX_NVME_SEG_CNT 128 /* max SGL element cnt per NVME cmnd */
#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
@@ -781,6 +781,7 @@ struct lpfc_hba {
uint32_t cfg_nvmet_fb_size;
uint32_t cfg_total_seg_cnt;
uint32_t cfg_sg_seg_cnt;
+ uint32_t cfg_nvme_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
uint64_t cfg_soft_wwnn;
uint64_t cfg_soft_wwpn;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index efe1242..1a83ade 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1113,12 +1113,12 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
first_data_sgl = sgl;
lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
- if (lpfc_ncmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+ if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6058 Too many sg segments from "
"NVME Transport. Max %d, "
"nvmeIO sg_cnt %d\n",
- phba->cfg_sg_seg_cnt,
+ phba->cfg_nvme_seg_cnt,
lpfc_ncmd->seg_cnt);
lpfc_ncmd->seg_cnt = 0;
return 1;
@@ -2157,8 +2157,18 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
- /* For now need + 1 to get around NVME transport logic */
- lpfc_nvme_template.max_sgl_segments = phba->cfg_sg_seg_cnt + 1;
+ /* Limit to LPFC_MAX_NVME_SEG_CNT.
+ * For now need + 1 to get around NVME transport logic.
+ */
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_INIT,
+ "6300 Reducing sg segment cnt to %d\n",
+ LPFC_MAX_NVME_SEG_CNT);
+ phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+ } else {
+ phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
+ }
+ lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
/* localport is allocated from the stack, but the registration
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 8348bb5..d0dda42 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -704,8 +704,19 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
pinfo.port_id = vport->fc_myDID;
+ /* Limit to LPFC_MAX_NVME_SEG_CNT.
+ * For now need + 1 to get around NVME transport logic.
+ */
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
+ "6400 Reducing sg segment cnt to %d\n",
+ LPFC_MAX_NVME_SEG_CNT);
+ phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+ } else {
+ phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
+ }
+ lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
- lpfc_tgttemplate.max_sgl_segments = phba->cfg_sg_seg_cnt + 1;
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED |
NVMET_FCTGTFEAT_CMD_IN_ISR |
@@ -1278,11 +1289,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
return NULL;
}
- if (rsp->sg_cnt > phba->cfg_sg_seg_cnt) {
+ if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: "
- "NPORT x%x oxid:x%x\n",
- ctxp->sid, ctxp->oxid);
+ "NPORT x%x oxid:x%x cnt %d\n",
+ ctxp->sid, ctxp->oxid, phba->cfg_nvme_seg_cnt);
return NULL;
}
--
2.1.0
next prev parent reply other threads:[~2017-04-20 4:47 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-04-20 4:46 [PATCH-v1 00/22] lpfc updates for 11.2.0.12 jsmart2021
2017-04-20 4:46 ` [PATCH-v1 01/22] Standardize nvme SGL segment count jsmart2021
2017-04-20 7:15 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 02/22] Fix nvme unregister port timeout jsmart2021
2017-04-20 7:15 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 03/22] Fix rejected nvme LS Req jsmart2021
2017-04-20 7:29 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 04/22] Fix log message in completion path jsmart2021
2017-04-20 7:32 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 05/22] Add debug messages for nvme/fcp resource allocation jsmart2021
2017-04-20 7:33 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 06/22] Fix spelling in comments jsmart2021
2017-04-20 7:34 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 07/22] Remove unused defines for NVME PostBuf jsmart2021
2017-04-20 7:34 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 08/22] Remove NULL ptr check before kfree jsmart2021
2017-04-20 7:34 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 09/22] Fix extra line print in rqpair debug print jsmart2021
2017-04-20 7:35 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 10/22] Fix PRLI ACC rsp for NVME jsmart2021
2017-04-20 7:35 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 11/22] Fix driver unload/reload operation jsmart2021
2017-04-20 7:37 ` Johannes Thumshirn
2017-04-20 9:08 ` Junichi Nomura
2017-04-20 4:46 ` [PATCH-v1 12/22] Fix driver usage of 128B WQEs when WQ_CREATE is V1 jsmart2021
2017-04-20 7:37 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 13/22] Fix nvme initiator handling when not enabled jsmart2021
2017-04-20 7:38 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 14/22] Remove hba lock from NVMET issue WQE jsmart2021
2017-04-20 7:40 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 15/22] Fix driver load issues when MRQ=8 jsmart2021
2017-04-20 7:42 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 16/22] Fix crash after issuing lip reset jsmart2021
2017-04-20 7:42 ` Johannes Thumshirn
2017-04-20 4:46 ` jsmart2021 [this message]
2017-04-20 4:46 ` [PATCH-v1 18/22] Add Fabric assigned WWN support jsmart2021
2017-04-20 7:49 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 19/22] Fix implicit logo and RSCN handling for NVMET jsmart2021
2017-04-20 7:53 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 20/22] Update ABORT processing " jsmart2021
2017-04-20 8:10 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 21/22] Fix Express lane queue creation jsmart2021
2017-04-20 8:11 ` Johannes Thumshirn
2017-04-20 4:46 ` [PATCH-v1 22/22] lpfc revison 11.2.0.12 jsmart2021
2017-04-20 8:11 ` Johannes Thumshirn
2017-04-20 18:30 ` [PATCH-v1 00/22] lpfc updates for 11.2.0.12 Jens Axboe
2017-04-20 18:54 ` James Smart
2017-04-21 7:00 ` Johannes Thumshirn
2017-04-21 11:24 ` Martin K. Petersen
2017-04-21 16:41 ` James Smart
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170420044641.10657-18-jsmart2021@gmail.com \
--to=jsmart2021@gmail.com \
--cc=dick.kennedy@broadcom.com \
--cc=james.smart@broadcom.com \
--cc=linux-nvme@lists.infradead.org \
--cc=linux-scsi@vger.kernel.org \
--cc=martin.petersen@oracle.com \
--cc=sagi@grimberg.me \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox