From: Julian Wiedmann <jwi@linux.ibm.com>
To: David Miller <davem@davemloft.net>, Jakub Kicinski <kuba@kernel.org>
Cc: linux-netdev <netdev@vger.kernel.org>,
linux-s390 <linux-s390@vger.kernel.org>,
Heiko Carstens <hca@linux.ibm.com>,
Karsten Graul <kgraul@linux.ibm.com>,
Julian Wiedmann <jwi@linux.ibm.com>
Subject: [PATCH net 2/4] s390/qeth: improve completion of pending TX buffers
Date: Tue, 9 Mar 2021 17:52:19 +0100 [thread overview]
Message-ID: <20210309165221.1735641-3-jwi@linux.ibm.com> (raw)
In-Reply-To: <20210309165221.1735641-1-jwi@linux.ibm.com>
The current design attaches a pending TX buffer to a custom
single-linked list, which is anchored at the buffer's slot on the
TX ring. The buffer is then checked for final completion whenever
this slot is processed during a subsequent TX NAPI poll cycle.
But if there's insufficient traffic on the ring, we might never make
enough progress to get back to this ring slot and discover the pending
buffer's final TX completion. In particular if this missing TX
completion blocks the application from sending further traffic.
So convert the custom single-linked list code to a per-queue list_head,
and scan this list on every TX NAPI cycle.
Fixes: 0da9581ddb0f ("qeth: exploit asynchronous delivery of storage blocks")
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
---
drivers/s390/net/qeth_core.h | 3 +-
drivers/s390/net/qeth_core_main.c | 69 +++++++++++++------------------
2 files changed, 30 insertions(+), 42 deletions(-)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index a1da83b0b0ef..91acff493612 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -436,7 +436,7 @@ struct qeth_qdio_out_buffer {
int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
struct qeth_qdio_out_q *q;
- struct qeth_qdio_out_buffer *next_pending;
+ struct list_head list_entry;
};
struct qeth_card;
@@ -500,6 +500,7 @@ struct qeth_qdio_out_q {
struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_outbuf_state *bufstates; /* convenience pointer */
+ struct list_head pending_bufs;
struct qeth_out_q_stats stats;
spinlock_t lock;
unsigned int priority;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f7bc0ca6909b..3763cd6d14f8 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -73,8 +73,6 @@ static void qeth_free_qdio_queues(struct qeth_card *card);
static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
enum iucv_tx_notify notification);
-static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
- int budget);
static void qeth_close_dev_handler(struct work_struct *work)
{
@@ -465,41 +463,6 @@ static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
return n;
}
-static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
- int forced_cleanup)
-{
- if (q->card->options.cq != QETH_CQ_ENABLED)
- return;
-
- if (q->bufs[bidx]->next_pending != NULL) {
- struct qeth_qdio_out_buffer *head = q->bufs[bidx];
- struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
-
- while (c) {
- if (forced_cleanup ||
- atomic_read(&c->state) == QETH_QDIO_BUF_EMPTY) {
- struct qeth_qdio_out_buffer *f = c;
-
- QETH_CARD_TEXT(f->q->card, 5, "fp");
- QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
- /* release here to avoid interleaving between
- outbound tasklet and inbound tasklet
- regarding notifications and lifecycle */
- qeth_tx_complete_buf(c, forced_cleanup, 0);
-
- c = f->next_pending;
- WARN_ON_ONCE(head->next_pending != f);
- head->next_pending = c;
- kmem_cache_free(qeth_qdio_outbuf_cache, f);
- } else {
- head = c;
- c = c->next_pending;
- }
-
- }
- }
-}
-
static void qeth_qdio_handle_aob(struct qeth_card *card,
unsigned long phys_aob_addr)
{
@@ -537,7 +500,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
qeth_notify_skbs(buffer->q, buffer, notification);
/* Free dangling allocations. The attached skbs are handled by
- * qeth_cleanup_handled_pending().
+ * qeth_tx_complete_pending_bufs().
*/
for (i = 0;
i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
@@ -1488,14 +1451,35 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
}
+static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
+ struct qeth_qdio_out_q *queue,
+ bool drain)
+{
+ struct qeth_qdio_out_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
+ if (drain || atomic_read(&buf->state) == QETH_QDIO_BUF_EMPTY) {
+ QETH_CARD_TEXT(card, 5, "fp");
+ QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);
+
+ qeth_tx_complete_buf(buf, drain, 0);
+
+ list_del(&buf->list_entry);
+ kmem_cache_free(qeth_qdio_outbuf_cache, buf);
+ }
+ }
+}
+
static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
{
int j;
+ qeth_tx_complete_pending_bufs(q->card, q, true);
+
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
if (!q->bufs[j])
continue;
- qeth_cleanup_handled_pending(q, j, 1);
+
qeth_clear_output_buffer(q, q->bufs[j], true, 0);
if (free) {
kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
@@ -2615,7 +2599,6 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
skb_queue_head_init(&newbuf->skb_list);
lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
newbuf->q = q;
- newbuf->next_pending = q->bufs[bidx];
atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
q->bufs[bidx] = newbuf;
return 0;
@@ -2697,6 +2680,7 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
card->qdio.out_qs[i] = queue;
queue->card = card;
queue->queue_no = i;
+ INIT_LIST_HEAD(&queue->pending_bufs);
spin_lock_init(&queue->lock);
timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
@@ -6106,6 +6090,8 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
qeth_schedule_recovery(card);
}
+ list_add(&buffer->list_entry,
+ &queue->pending_bufs);
/* Skip clearing the buffer: */
return;
case QETH_QDIO_BUF_QAOB_OK:
@@ -6161,6 +6147,8 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
unsigned int bytes = 0;
int completed;
+ qeth_tx_complete_pending_bufs(card, queue, false);
+
if (qeth_out_queue_is_empty(queue)) {
napi_complete(napi);
return 0;
@@ -6193,7 +6181,6 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
qeth_handle_send_error(card, buffer, error);
qeth_iqd_tx_complete(queue, bidx, error, budget);
- qeth_cleanup_handled_pending(queue, bidx, false);
}
netdev_tx_completed_queue(txq, packets, bytes);
--
2.25.1
next prev parent reply other threads:[~2021-03-09 16:53 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-03-09 16:52 [PATCH net 0/4] s390/qeth: fixes 2021-03-09 Julian Wiedmann
2021-03-09 16:52 ` [PATCH net 1/4] s390/qeth: fix memory leak after failed TX Buffer allocation Julian Wiedmann
2021-03-09 16:52 ` Julian Wiedmann [this message]
2021-03-09 16:52 ` [PATCH net 3/4] s390/qeth: schedule TX NAPI on QAOB completion Julian Wiedmann
2021-03-09 16:52 ` [PATCH net 4/4] s390/qeth: fix notification for pending buffers during teardown Julian Wiedmann
2021-03-10 0:20 ` [PATCH net 0/4] s390/qeth: fixes 2021-03-09 patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210309165221.1735641-3-jwi@linux.ibm.com \
--to=jwi@linux.ibm.com \
--cc=davem@davemloft.net \
--cc=hca@linux.ibm.com \
--cc=kgraul@linux.ibm.com \
--cc=kuba@kernel.org \
--cc=linux-s390@vger.kernel.org \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).