netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Leon Romanovsky <leon@kernel.org>
To: Caleb Sander <csander@purestorage.com>
Cc: Ariel Elior <aelior@marvell.com>,
	Manish Chopra <manishc@marvell.com>,
	Paolo Abeni <pabeni@redhat.com>,
	netdev@vger.kernel.org, Joern Engel <joern@purestorage.com>,
	Alok Prasad <palok@marvell.com>
Subject: Re: [PATCH net v2] qed: allow sleep in qed_mcp_trace_dump()
Date: Thu, 29 Dec 2022 08:52:23 +0200	[thread overview]
Message-ID: <Y605JxeB0c21zeI8@unreal> (raw)
In-Reply-To: <20221228220045.101647-1-csander@purestorage.com>

On Wed, Dec 28, 2022 at 03:00:46PM -0700, Caleb Sander wrote:
> By default, qed_mcp_cmd_and_union() delays 10us at a time in a loop
> that can run 500K times, so calls to qed_mcp_nvm_rd_cmd()
> may block the current thread for over 5s.
> We observed thread scheduling delays over 700ms in production,
> with stacktraces pointing to this code as the culprit.

Please add stacktrace to the commit message.

> 
> qed_mcp_trace_dump() is called from ethtool, so sleeping is permitted.
> It already can sleep in qed_mcp_halt(), which calls qed_mcp_cmd().
> Add a "can sleep" parameter to qed_find_nvram_image() and
> qed_nvram_read() so they can sleep during qed_mcp_trace_dump().
> qed_mcp_trace_get_meta_info() and qed_mcp_trace_read_meta(),
> called only by qed_mcp_trace_dump(), allow these functions to sleep.
> I can't tell if the other caller (qed_grc_dump_mcp_hw_dump()) can sleep,
> so keep b_can_sleep set to false when it calls these functions.
> 
> Signed-off-by: Caleb Sander <csander@purestorage.com>
> Acked-by: Alok Prasad <palok@marvell.com>
> Fixes: c965db4446291 ("qed: Add support for debug data collection")

Fixes line should be before *-by tags.

> ---
>  drivers/net/ethernet/qlogic/qed/qed_debug.c | 28 +++++++++++++++------
>  1 file changed, 20 insertions(+), 8 deletions(-)

Please add changelog here while you are resending patches.

Thanks

> 
> diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
> index 86ecb080b153..cdcead614e9f 100644
> --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
> +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
> @@ -1830,11 +1830,12 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
>  /* Finds the meta data image in NVRAM */
>  static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
>  					    struct qed_ptt *p_ptt,
>  					    u32 image_type,
>  					    u32 *nvram_offset_bytes,
> -					    u32 *nvram_size_bytes)
> +					    u32 *nvram_size_bytes,
> +					    bool b_can_sleep)
>  {
>  	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
>  	struct mcp_file_att file_att;
>  	int nvm_result;
>  
> @@ -1844,11 +1845,12 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
>  					DRV_MSG_CODE_NVM_GET_FILE_ATT,
>  					image_type,
>  					&ret_mcp_resp,
>  					&ret_mcp_param,
>  					&ret_txn_size,
> -					(u32 *)&file_att, false);
> +					(u32 *)&file_att,
> +					b_can_sleep);
>  
>  	/* Check response */
>  	if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) !=
>  	    FW_MSG_CODE_NVM_OK)
>  		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
> @@ -1871,11 +1873,13 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
>  
>  /* Reads data from NVRAM */
>  static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
>  				      struct qed_ptt *p_ptt,
>  				      u32 nvram_offset_bytes,
> -				      u32 nvram_size_bytes, u32 *ret_buf)
> +				      u32 nvram_size_bytes,
> +				      u32 *ret_buf,
> +				      bool b_can_sleep)
>  {
>  	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
>  	s32 bytes_left = nvram_size_bytes;
>  	u32 read_offset = 0, param = 0;
>  
> @@ -1897,11 +1901,11 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
>  		if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
>  				       DRV_MSG_CODE_NVM_READ_NVRAM, param,
>  				       &ret_mcp_resp,
>  				       &ret_mcp_param, &ret_read_size,
>  				       (u32 *)((u8 *)ret_buf + read_offset),
> -				       false))
> +				       b_can_sleep))
>  			return DBG_STATUS_NVRAM_READ_FAILED;
>  
>  		/* Check response */
>  		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
>  			return DBG_STATUS_NVRAM_READ_FAILED;
> @@ -3378,11 +3382,12 @@ static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
>  	/* Read HW dump image from NVRAM */
>  	status = qed_find_nvram_image(p_hwfn,
>  				      p_ptt,
>  				      NVM_TYPE_HW_DUMP_OUT,
>  				      &hw_dump_offset_bytes,
> -				      &hw_dump_size_bytes);
> +				      &hw_dump_size_bytes,
> +				      false);
>  	if (status != DBG_STATUS_OK)
>  		return 0;
>  
>  	hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes);
>  
> @@ -3395,11 +3400,13 @@ static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
>  	/* Read MCP HW dump image into dump buffer */
>  	if (dump && hw_dump_size_dwords) {
>  		status = qed_nvram_read(p_hwfn,
>  					p_ptt,
>  					hw_dump_offset_bytes,
> -					hw_dump_size_bytes, dump_buf + offset);
> +					hw_dump_size_bytes,
> +					dump_buf + offset,
> +					false);
>  		if (status != DBG_STATUS_OK) {
>  			DP_NOTICE(p_hwfn,
>  				  "Failed to read MCP HW Dump image from NVRAM\n");
>  			return 0;
>  		}
> @@ -4121,11 +4128,13 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
>  	    (*running_bundle_id ==
>  	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
>  	return qed_find_nvram_image(p_hwfn,
>  				    p_ptt,
>  				    nvram_image_type,
> -				    trace_meta_offset, trace_meta_size);
> +				    trace_meta_offset,
> +				    trace_meta_size,
> +				    true);
>  }
>  
>  /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
>  static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
>  					       struct qed_ptt *p_ptt,
> @@ -4137,11 +4146,14 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
>  	u32 signature;
>  
>  	/* Read meta data from NVRAM */
>  	status = qed_nvram_read(p_hwfn,
>  				p_ptt,
> -				nvram_offset_in_bytes, size_in_bytes, buf);
> +				nvram_offset_in_bytes,
> +				size_in_bytes,
> +				buf,
> +				true);
>  	if (status != DBG_STATUS_OK)
>  		return status;
>  
>  	/* Extract and check first signature */
>  	signature = qed_read_unaligned_dword(byte_buf);
> -- 
> 2.25.1
> 

  reply	other threads:[~2022-12-29  6:53 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-12-17 17:56 [PATCH] qed: allow sleep in qed_mcp_trace_dump() Caleb Sander
2022-12-20  9:55 ` Paolo Abeni
2022-12-21 16:48   ` Caleb Sander
2022-12-28 22:00   ` [PATCH net v2] " Caleb Sander
2022-12-29  6:52     ` Leon Romanovsky [this message]
2023-01-03 23:30     ` [PATCH net v3] " Caleb Sander
2023-01-05  4:44       ` patchwork-bot+netdevbpf
     [not found] ` <PH0PR18MB516573B4C93F1A0CC27D5818C4E49@PH0PR18MB5165.namprd18.prod.outlook.com>
2022-12-22  9:38   ` [PATCH] " Alok Prasad

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Y605JxeB0c21zeI8@unreal \
    --to=leon@kernel.org \
    --cc=aelior@marvell.com \
    --cc=csander@purestorage.com \
    --cc=joern@purestorage.com \
    --cc=manishc@marvell.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=palok@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).