From: Arthur Kiyanovski <akiyano@amazon.com>
To: David Miller <davem@davemloft.net>,
Jakub Kicinski <kuba@kernel.org>, <netdev@vger.kernel.org>
Cc: Arthur Kiyanovski <akiyano@amazon.com>,
"Woodhouse, David" <dwmw@amazon.com>,
"Machulsky, Zorik" <zorik@amazon.com>,
"Matushevsky, Alexander" <matua@amazon.com>,
Saeed Bshara <saeedb@amazon.com>, "Wilson, Matt" <msw@amazon.com>,
"Liguori, Anthony" <aliguori@amazon.com>,
"Bshara, Nafea" <nafea@amazon.com>,
"Belgazal, Netanel" <netanel@amazon.com>,
"Saidi, Ali" <alisaidi@amazon.com>,
"Herrenschmidt, Benjamin" <benh@amazon.com>,
"Dagan, Noam" <ndagan@amazon.com>,
"Agroskin, Shay" <shayagr@amazon.com>,
"Arinzon, David" <darinzon@amazon.com>
Subject: [PATCH V2 net-next 07/10] net: ena: Remove ena_calc_queue_size_ctx struct
Date: Fri, 7 Jan 2022 20:23:43 +0000 [thread overview]
Message-ID: <20220107202346.3522-8-akiyano@amazon.com> (raw)
In-Reply-To: <20220107202346.3522-1-akiyano@amazon.com>
This struct was used to pass data from callee function to its caller.
Its usage can be avoided.
Removing it results in less code without any damage to code readability.
Also it allows to consolidate ring size calculation into a single
function (ena_calc_io_queue_size()).
Signed-off-by: Shay Agroskin <shayagr@amazon.com>
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
---
drivers/net/ethernet/amazon/ena/ena_netdev.c | 49 ++++++++------------
drivers/net/ethernet/amazon/ena/ena_netdev.h | 12 -----
2 files changed, 19 insertions(+), 42 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index b4e10f7082e2..4ad0c602d76c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -4146,10 +4146,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
}
-static void ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
+static void ena_calc_io_queue_size(struct ena_adapter *adapter,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
- struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
- struct ena_com_dev *ena_dev = ctx->ena_dev;
+ struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq;
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
u32 max_tx_queue_size;
@@ -4157,7 +4158,7 @@ static void ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
- &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
+ &get_feat_ctx->max_queue_ext.max_queue_ext;
max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
max_queue_ext->max_rx_sq_depth);
max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
@@ -4169,13 +4170,13 @@ static void ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
max_tx_queue_size = min_t(u32, max_tx_queue_size,
max_queue_ext->max_tx_sq_depth);
- ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queue_ext->max_per_packet_tx_descs);
- ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queue_ext->max_per_packet_rx_descs);
+ adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queue_ext->max_per_packet_tx_descs);
+ adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queue_ext->max_per_packet_rx_descs);
} else {
struct ena_admin_queue_feature_desc *max_queues =
- &ctx->get_feat_ctx->max_queues;
+ &get_feat_ctx->max_queues;
max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
max_queues->max_sq_depth);
max_tx_queue_size = max_queues->max_cq_depth;
@@ -4187,10 +4188,10 @@ static void ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
max_tx_queue_size = min_t(u32, max_tx_queue_size,
max_queues->max_sq_depth);
- ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queues->max_packet_tx_descs);
- ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queues->max_packet_rx_descs);
+ adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queues->max_packet_tx_descs);
+ adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queues->max_packet_rx_descs);
}
max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
@@ -4204,10 +4205,10 @@ static void ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
tx_queue_size = rounddown_pow_of_two(tx_queue_size);
rx_queue_size = rounddown_pow_of_two(rx_queue_size);
- ctx->max_tx_queue_size = max_tx_queue_size;
- ctx->max_rx_queue_size = max_rx_queue_size;
- ctx->tx_queue_size = tx_queue_size;
- ctx->rx_queue_size = rx_queue_size;
+ adapter->max_tx_ring_size = max_tx_queue_size;
+ adapter->max_rx_ring_size = max_rx_queue_size;
+ adapter->requested_tx_ring_size = tx_queue_size;
+ adapter->requested_rx_ring_size = rx_queue_size;
}
/* ena_probe - Device Initialization Routine
@@ -4222,7 +4223,6 @@ static void ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
*/
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ena_calc_queue_size_ctx calc_queue_ctx = {};
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = NULL;
struct ena_adapter *adapter;
@@ -4307,10 +4307,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_device_destroy;
}
- calc_queue_ctx.ena_dev = ena_dev;
- calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
- calc_queue_ctx.pdev = pdev;
-
/* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
* Updated during device initialization with the real granularity
*/
@@ -4318,7 +4314,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
- ena_calc_io_queue_size(&calc_queue_ctx);
+ ena_calc_io_queue_size(adapter, &get_feat_ctx);
if (unlikely(!max_num_io_queues)) {
rc = -EFAULT;
goto err_device_destroy;
@@ -4328,13 +4324,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
- adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
- adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
- adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
- adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
- adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
- adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
-
adapter->num_io_queues = max_num_io_queues;
adapter->max_num_io_queues = max_num_io_queues;
adapter->last_monitored_tx_qid = 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index f70f1242e5b5..25b9d4dd0535 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -140,18 +140,6 @@ struct ena_napi {
struct dim dim;
};
-struct ena_calc_queue_size_ctx {
- struct ena_com_dev_get_features_ctx *get_feat_ctx;
- struct ena_com_dev *ena_dev;
- struct pci_dev *pdev;
- u32 tx_queue_size;
- u32 rx_queue_size;
- u32 max_tx_queue_size;
- u32 max_rx_queue_size;
- u16 max_tx_sgl_size;
- u16 max_rx_sgl_size;
-};
-
struct ena_tx_buffer {
struct sk_buff *skb;
/* num of ena desc for this specific skb
--
2.32.0
next prev parent reply other threads:[~2022-01-07 20:24 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-07 20:23 [PATCH V2 net-next 00/10] ENA: capabilities field and cosmetic changes Arthur Kiyanovski
2022-01-07 20:23 ` [PATCH V2 net-next 01/10] net: ena: Change return value of ena_calc_io_queue_size() to void Arthur Kiyanovski
2022-01-07 20:23 ` [PATCH V2 net-next 02/10] net: ena: Add capabilities field with support for ENI stats capability Arthur Kiyanovski
2022-01-07 20:23 ` [PATCH V2 net-next 03/10] net: ena: Change ENI stats support check to use capabilities field Arthur Kiyanovski
2022-01-07 20:23 ` [PATCH V2 net-next 04/10] net: ena: Update LLQ header length in ena documentation Arthur Kiyanovski
2022-01-07 20:23 ` [PATCH V2 net-next 05/10] net: ena: Remove redundant return code check Arthur Kiyanovski
2022-01-07 20:23 ` [PATCH V2 net-next 06/10] net: ena: Move reset completion print to the reset function Arthur Kiyanovski
2022-01-07 20:23 ` Arthur Kiyanovski [this message]
2022-01-07 20:23 ` [PATCH V2 net-next 08/10] net: ena: Add debug prints for invalid req_id resets Arthur Kiyanovski
2022-01-07 20:23 ` [PATCH V2 net-next 09/10] net: ena: Change the name of bad_csum variable Arthur Kiyanovski
2022-01-08 3:27 ` Jakub Kicinski
2022-01-07 20:23 ` [PATCH V2 net-next 10/10] net: ena: Extract recurring driver reset code into a function Arthur Kiyanovski
2022-01-08 3:30 ` [PATCH V2 net-next 00/10] ENA: capabilities field and cosmetic changes patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220107202346.3522-8-akiyano@amazon.com \
--to=akiyano@amazon.com \
--cc=aliguori@amazon.com \
--cc=alisaidi@amazon.com \
--cc=benh@amazon.com \
--cc=darinzon@amazon.com \
--cc=davem@davemloft.net \
--cc=dwmw@amazon.com \
--cc=kuba@kernel.org \
--cc=matua@amazon.com \
--cc=msw@amazon.com \
--cc=nafea@amazon.com \
--cc=ndagan@amazon.com \
--cc=netanel@amazon.com \
--cc=netdev@vger.kernel.org \
--cc=saeedb@amazon.com \
--cc=shayagr@amazon.com \
--cc=zorik@amazon.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).