From: Yuval Mintz <Yuval.Mintz@qlogic.com>
To: <davem@davemloft.net>, <netdev@vger.kernel.org>
Cc: Yuval Mintz <Yuval.Mintz@caviumnetworks.com>
Subject: [PATCH net-next 5/6] qed: Allow chance for fast ramrod completions
Date: Sun, 9 Oct 2016 18:25:37 +0300 [thread overview]
Message-ID: <1476026738-26069-6-git-send-email-Yuval.Mintz@qlogic.com> (raw)
In-Reply-To: <1476026738-26069-1-git-send-email-Yuval.Mintz@qlogic.com>
From: Yuval Mintz <Yuval.Mintz@caviumnetworks.com>
Whenever a ramrod is being sent for some device configuration,
the driver is going to sleep at least 5ms between each iteration
of polling on the completion of the ramrod.
However, in almost every configuration scenario the firmware
would be able to comply and complete the ramrod in a manner of
several usecs. This is especially important in cases where there
might be a lot of sequential configurations applying to the hardware
[e.g., RoCE], in which case the existing scheme might cause some
visible user delays.
This patch changes the completion scheme - instead of immediately
starting to sleep for a 'long' period, allow the device to quickly
poll on the first iteration after a couple of usecs.
Signed-off-by: Yuval Mintz <Yuval.Mintz@caviumnetworks.com>
---
drivers/net/ethernet/qlogic/qed/qed_spq.c | 85 +++++++++++++++++++++----------
1 file changed, 59 insertions(+), 26 deletions(-)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index caff415..259a615 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -37,7 +37,11 @@
***************************************************************************/
#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
-#define SPQ_BLOCK_SLEEP_LENGTH (1000)
+
+#define SPQ_BLOCK_DELAY_MAX_ITER (10)
+#define SPQ_BLOCK_DELAY_US (10)
+#define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
+#define SPQ_BLOCK_SLEEP_MS (5)
/***************************************************************************
* Blocking Imp. (BLOCK/EBLOCK mode)
@@ -57,53 +61,81 @@ static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
smp_wmb();
}
-static int qed_spq_block(struct qed_hwfn *p_hwfn,
- struct qed_spq_entry *p_ent,
- u8 *p_fw_ret)
+static int __qed_spq_block(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *p_fw_ret, bool sleep_between_iter)
{
- int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
struct qed_spq_comp_done *comp_done;
- int rc;
+ u32 iter_cnt;
comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
- while (sleep_count) {
- /* validate we receive completion update */
+ iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
+ : SPQ_BLOCK_DELAY_MAX_ITER;
+
+ while (iter_cnt--) {
+ /* Validate we receive completion update */
smp_rmb();
if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return 0;
}
- usleep_range(5000, 10000);
- sleep_count--;
+
+ if (sleep_between_iter)
+ msleep(SPQ_BLOCK_SLEEP_MS);
+ else
+ udelay(SPQ_BLOCK_DELAY_US);
}
+ return -EBUSY;
+}
+
+static int qed_spq_block(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *p_fw_ret, bool skip_quick_poll)
+{
+ struct qed_spq_comp_done *comp_done;
+ int rc;
+
+ /* A relatively short polling period w/o sleeping, to allow the FW to
+ * complete the ramrod and thus possibly to avoid the following sleeps.
+ */
+ if (!skip_quick_poll) {
+ rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
+ if (!rc)
+ return 0;
+ }
+
+ /* Move to polling with a sleeping period between iterations */
+ rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+ if (!rc)
+ return 0;
+
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
- if (rc != 0)
+ if (rc) {
DP_NOTICE(p_hwfn, "MCP drain failed\n");
+ goto err;
+ }
/* Retry after drain */
- sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
- while (sleep_count) {
- /* validate we receive completion update */
- smp_rmb();
- if (comp_done->done == 1) {
- if (p_fw_ret)
- *p_fw_ret = comp_done->fw_return_code;
- return 0;
- }
- usleep_range(5000, 10000);
- sleep_count--;
- }
+ rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+ if (!rc)
+ return 0;
+ comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return 0;
}
-
- DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
+err:
+ DP_NOTICE(p_hwfn,
+ "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
+ le32_to_cpu(p_ent->elem.hdr.cid),
+ p_ent->elem.hdr.cmd_id,
+ p_ent->elem.hdr.protocol_id,
+ le16_to_cpu(p_ent->elem.hdr.echo));
return -EBUSY;
}
@@ -729,7 +761,8 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
* access p_ent here to see whether it's successful or not.
* Thus, after gaining the answer perform the cleanup here.
*/
- rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+ rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
+ p_ent->queue == &p_spq->unlimited_pending);
if (p_ent->queue == &p_spq->unlimited_pending) {
/* This is an allocated p_ent which does not need to
--
1.9.3
next prev parent reply other threads:[~2016-10-09 16:18 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-10-09 15:25 [PATCH net-next 0/6] qed*: driver updates Yuval Mintz
2016-10-09 15:25 ` [PATCH net-next 1/6] qed: Pass MAC hints to VFs Yuval Mintz
2016-10-09 15:25 ` [PATCH net-next 2/6] qede: GSO support for tunnels with outer csum Yuval Mintz
2016-10-09 15:25 ` [PATCH net-next 3/6] qede: Prevent GSO on long Geneve headers Yuval Mintz
2016-10-09 15:25 ` [PATCH net-next 4/6] qed*: Allow unicast filtering Yuval Mintz
2016-10-09 15:25 ` Yuval Mintz [this message]
2016-10-10 0:08 ` [PATCH net-next 5/6] qed: Allow chance for fast ramrod completions Eric Dumazet
2016-10-10 6:33 ` Mintz, Yuval
2016-10-10 7:17 ` David Miller
2016-10-09 15:25 ` [PATCH net-next 6/6] qed: Handle malicious VFs events Yuval Mintz
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1476026738-26069-6-git-send-email-Yuval.Mintz@qlogic.com \
--to=yuval.mintz@qlogic.com \
--cc=Yuval.Mintz@caviumnetworks.com \
--cc=davem@davemloft.net \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).