From: Julian Wiedmann <jwi@linux.ibm.com>
To: David Miller <davem@davemloft.net>
Cc: <netdev@vger.kernel.org>, <linux-s390@vger.kernel.org>,
Heiko Carstens <heiko.carstens@de.ibm.com>,
Stefan Raspl <raspl@linux.ibm.com>,
Ursula Braun <ubraun@linux.ibm.com>,
Julian Wiedmann <jwi@linux.ibm.com>
Subject: [PATCH net-next 2/8] s390/qeth: use IQD Multi-Write
Date: Thu, 31 Oct 2019 13:42:15 +0100 [thread overview]
Message-ID: <20191031124221.34028-3-jwi@linux.ibm.com> (raw)
In-Reply-To: <20191031124221.34028-1-jwi@linux.ibm.com>
For IQD devices with Multi-Write support, we can defer the queue-flush
further and transmit multiple IO buffers with a single TX doorbell.
The same-target restriction still applies.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Reviewed-by: Alexandra Winter <wintera@linux.ibm.com>
---
drivers/s390/net/qeth_core.h | 9 ++++++
drivers/s390/net/qeth_core_main.c | 54 +++++++++++++++++++++++++------
2 files changed, 53 insertions(+), 10 deletions(-)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e4b55f9aa062..d08154502b15 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -532,6 +532,8 @@ struct qeth_qdio_out_q {
struct timer_list timer;
struct qeth_hdr *prev_hdr;
u8 bulk_start;
+ u8 bulk_count;
+ u8 bulk_max;
};
#define qeth_for_each_output_queue(card, q, i) \
@@ -878,6 +880,13 @@ static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
return txq;
}
+static inline bool qeth_iqd_is_mcast_queue(struct qeth_card *card,
+ struct qeth_qdio_out_q *queue)
+{
+ return qeth_iqd_translate_txq(card->dev, queue->queue_no) ==
+ QETH_IQD_MCAST_TXQ;
+}
+
static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
unsigned int elements)
{
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index dda274351c21..62054afd73cc 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2634,6 +2634,18 @@ static int qeth_init_input_buffer(struct qeth_card *card,
return 0;
}
+static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
+ struct qeth_qdio_out_q *queue)
+{
+ if (!IS_IQD(card) ||
+ qeth_iqd_is_mcast_queue(card, queue) ||
+ card->options.cq == QETH_CQ_ENABLED ||
+ qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
+ return 1;
+
+ return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
+}
+
int qeth_init_qdio_queues(struct qeth_card *card)
{
unsigned int i;
@@ -2673,6 +2685,8 @@ int qeth_init_qdio_queues(struct qeth_card *card)
queue->do_pack = 0;
queue->prev_hdr = NULL;
queue->bulk_start = 0;
+ queue->bulk_count = 0;
+ queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
atomic_set(&queue->used_buffers, 0);
atomic_set(&queue->set_pci_flags_count, 0);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
@@ -3318,10 +3332,11 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
{
- qeth_flush_buffers(queue, queue->bulk_start, 1);
+ qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
- queue->bulk_start = QDIO_BUFNR(queue->bulk_start + 1);
+ queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
queue->prev_hdr = NULL;
+ queue->bulk_count = 0;
}
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
@@ -3680,10 +3695,10 @@ static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
}
static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buffer,
struct sk_buff *curr_skb,
struct qeth_hdr *curr_hdr)
{
+ struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
struct qeth_hdr *prev_hdr = queue->prev_hdr;
if (!prev_hdr)
@@ -3803,13 +3818,14 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, unsigned int offset,
unsigned int hd_len)
{
- struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
unsigned int bytes = qdisc_pkt_len(skb);
+ struct qeth_qdio_out_buffer *buffer;
unsigned int next_element;
struct netdev_queue *txq;
bool stopped = false;
bool flush;
+ buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
/* Just a sanity check, the wake/stop logic should ensure that we always
@@ -3818,11 +3834,23 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY;
- if ((buffer->next_element_to_fill + elements > queue->max_elements) ||
- !qeth_iqd_may_bulk(queue, buffer, skb, hdr)) {
- atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
- qeth_flush_queue(queue);
- buffer = queue->bufs[queue->bulk_start];
+ flush = !qeth_iqd_may_bulk(queue, skb, hdr);
+
+ if (flush ||
+ (buffer->next_element_to_fill + elements > queue->max_elements)) {
+ if (buffer->next_element_to_fill > 0) {
+ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+ queue->bulk_count++;
+ }
+
+ if (queue->bulk_count >= queue->bulk_max)
+ flush = true;
+
+ if (flush)
+ qeth_flush_queue(queue);
+
+ buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
+ queue->bulk_count)];
/* Sanity-check again: */
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
@@ -3848,7 +3876,13 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
if (flush || next_element >= queue->max_elements) {
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
- qeth_flush_queue(queue);
+ queue->bulk_count++;
+
+ if (queue->bulk_count >= queue->bulk_max)
+ flush = true;
+
+ if (flush)
+ qeth_flush_queue(queue);
}
if (stopped && !qeth_out_queue_is_full(queue))
--
2.17.1
next prev parent reply other threads:[~2019-10-31 12:42 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-10-31 12:42 [PATCH net-next 0/8] s390/qeth: updates 2019-10-31 Julian Wiedmann
2019-10-31 12:42 ` [PATCH net-next 1/8] s390/qdio: implement IQD Multi-Write Julian Wiedmann
2019-10-31 12:42 ` Julian Wiedmann [this message]
2019-10-31 12:42 ` [PATCH net-next 3/8] s390/qeth: use QDIO_BUFNR() Julian Wiedmann
2019-10-31 12:42 ` [PATCH net-next 4/8] s390/qeth: keep IRQ disabled until NAPI is really done Julian Wiedmann
2019-10-31 12:42 ` [PATCH net-next 5/8] s390/qeth: consolidate some duplicated HW cmd code Julian Wiedmann
2019-10-31 12:42 ` [PATCH net-next 6/8] s390/qeth: don't set card state in qeth_qdio_clear_card() Julian Wiedmann
2019-10-31 12:42 ` [PATCH net-next 7/8] s390/qeth: use helpers for IP address hashing Julian Wiedmann
2019-10-31 12:42 ` [PATCH net-next 8/8] s390/qeth: don't cache MAC addresses for multicast IPs Julian Wiedmann
2019-10-31 19:33 ` [PATCH net-next 0/8] s390/qeth: updates 2019-10-31 David Miller
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191031124221.34028-3-jwi@linux.ibm.com \
--to=jwi@linux.ibm.com \
--cc=davem@davemloft.net \
--cc=heiko.carstens@de.ibm.com \
--cc=linux-s390@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=raspl@linux.ibm.com \
--cc=ubraun@linux.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).