From: Shay Agroskin <shayagr@amazon.com>
To: David Miller <davem@davemloft.net>,
Jakub Kicinski <kuba@kernel.org>, <netdev@vger.kernel.org>
Cc: David Arinzon <darinzon@amazon.com>,
"Woodhouse, David" <dwmw@amazon.com>,
"Machulsky, Zorik" <zorik@amazon.com>,
"Matushevsky, Alexander" <matua@amazon.com>,
Saeed Bshara <saeedb@amazon.com>, "Wilson, Matt" <msw@amazon.com>,
"Liguori, Anthony" <aliguori@amazon.com>,
"Bshara, Nafea" <nafea@amazon.com>,
"Belgazal, Netanel" <netanel@amazon.com>,
"Saidi, Ali" <alisaidi@amazon.com>,
"Herrenschmidt, Benjamin" <benh@amazon.com>,
"Kiyanovski, Arthur" <akiyano@amazon.com>,
"Dagan, Noam" <ndagan@amazon.com>,
"Agroskin, Shay" <shayagr@amazon.com>,
"Itzko, Shahar" <itzko@amazon.com>,
"Abboud, Osama" <osamaabb@amazon.com>
Subject: [PATCH v5 net-next 3/6] net: ena: Add an option to configure large LLQ headers
Date: Thu, 16 Mar 2023 16:27:03 +0200 [thread overview]
Message-ID: <20230316142706.4046263-4-shayagr@amazon.com> (raw)
In-Reply-To: <20230316142706.4046263-1-shayagr@amazon.com>
From: David Arinzon <darinzon@amazon.com>
Allow configuring the device with large LLQ headers. The Low Latency
Queue (LLQ) allows the driver to write the first N bytes of the packet,
along with the rest of the TX descriptors directly into device (N can be
either 96 or 224 for large LLQ headers configuration).
Having L4 TCP/UDP headers contained in the first 96 bytes of the packet
is required to get maximum performance from the device.
Signed-off-by: David Arinzon <darinzon@amazon.com>
Signed-off-by: Shay Agroskin <shayagr@amazon.com>
---
drivers/net/ethernet/amazon/ena/ena_netdev.c | 90 +++++++++++++++-----
drivers/net/ethernet/amazon/ena/ena_netdev.h | 8 ++
2 files changed, 76 insertions(+), 22 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 372e33831323..b97ecae439b2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -3374,6 +3374,15 @@ static void ena_calc_io_queue_size(struct ena_adapter *adapter,
u32 max_tx_queue_size;
u32 max_rx_queue_size;
+ /* If this function is called after driver load, the ring sizes have already
+ * been configured. Take it into account when recalculating ring size.
+ */
+ if (adapter->tx_ring->ring_size)
+ tx_queue_size = adapter->tx_ring->ring_size;
+
+ if (adapter->rx_ring->ring_size)
+ rx_queue_size = adapter->rx_ring->ring_size;
+
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
&get_feat_ctx->max_queue_ext.max_queue_ext;
@@ -3415,6 +3424,24 @@ static void ena_calc_io_queue_size(struct ena_adapter *adapter,
max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
+ /* When forcing large headers, we multiply the entry size by 2, and therefore divide
+ * the queue size by 2, leaving the amount of memory used by the queues unchanged.
+ */
+ if (adapter->large_llq_header_enabled) {
+ if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
+ ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ max_tx_queue_size /= 2;
+ dev_info(&adapter->pdev->dev,
+ "Forcing large headers and decreasing maximum TX queue size to %d\n",
+ max_tx_queue_size);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
+
+ adapter->large_llq_header_enabled = false;
+ }
+ }
+
tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
max_tx_queue_size);
rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
@@ -3452,13 +3479,30 @@ static int ena_device_validate_params(struct ena_adapter *adapter,
return 0;
}
-static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
+static void set_default_llq_configurations(struct ena_adapter *adapter,
+ struct ena_llq_configurations *llq_config,
+ struct ena_admin_feature_llq_desc *llq)
{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+
llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
- llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
- llq_config->llq_ring_entry_size_value = 128;
+
+ adapter->large_llq_header_supported =
+ !!(ena_dev->supported_features & BIT(ENA_ADMIN_LLQ));
+ adapter->large_llq_header_supported &=
+ !!(llq->entry_size_ctrl_supported &
+ ENA_ADMIN_LIST_ENTRY_SIZE_256B);
+
+ if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
+ adapter->large_llq_header_enabled) {
+ llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
+ llq_config->llq_ring_entry_size_value = 256;
+ } else {
+ llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+ llq_config->llq_ring_entry_size_value = 128;
+ }
}
static int ena_set_queues_placement_policy(struct pci_dev *pdev,
@@ -3477,6 +3521,13 @@ static int ena_set_queues_placement_policy(struct pci_dev *pdev,
return 0;
}
+ if (!ena_dev->mem_bar) {
+ netdev_err(ena_dev->net_device,
+ "LLQ is advertised as supported but device doesn't expose mem bar\n");
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ return 0;
+ }
+
rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
if (unlikely(rc)) {
dev_err(&pdev->dev,
@@ -3492,15 +3543,8 @@ static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev
{
bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR));
- if (!has_mem_bar) {
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- dev_err(&pdev->dev,
- "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
- ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
- }
-
+ if (!has_mem_bar)
return 0;
- }
ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
pci_resource_start(pdev, ENA_MEM_BAR),
@@ -3512,10 +3556,11 @@ static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev
return 0;
}
-static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
+static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
struct ena_com_dev_get_features_ctx *get_feat_ctx,
bool *wd_state)
{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
struct ena_llq_configurations llq_config;
struct device *dev = &pdev->dev;
bool readless_supported;
@@ -3600,7 +3645,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
*wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
- set_default_llq_configurations(&llq_config);
+ set_default_llq_configurations(adapter, &llq_config, &get_feat_ctx->llq);
rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
&llq_config);
@@ -3609,6 +3654,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
goto err_admin_init;
}
+ ena_calc_io_queue_size(adapter, get_feat_ctx);
+
return 0;
err_admin_init:
@@ -3707,7 +3754,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
int rc;
set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
- rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
+ rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx, &wd_state);
if (rc) {
dev_err(&pdev->dev, "Can not initialize device\n");
goto err;
@@ -4311,18 +4358,18 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, adapter);
- rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
+ rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
if (rc) {
- dev_err(&pdev->dev, "ENA device init failed\n");
- if (rc == -ETIME)
- rc = -EPROBE_DEFER;
+ dev_err(&pdev->dev, "ENA LLQ bar mapping failed\n");
goto err_netdev_destroy;
}
- rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
+ rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state);
if (rc) {
- dev_err(&pdev->dev, "ENA llq bar mapping failed\n");
- goto err_device_destroy;
+ dev_err(&pdev->dev, "ENA device init failed\n");
+ if (rc == -ETIME)
+ rc = -EPROBE_DEFER;
+ goto err_netdev_destroy;
}
/* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
@@ -4332,7 +4379,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
- ena_calc_io_queue_size(adapter, &get_feat_ctx);
if (unlikely(!max_num_io_queues)) {
rc = -EFAULT;
goto err_device_destroy;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 2cb141079474..3e8c4a66c7d8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -334,6 +334,14 @@ struct ena_adapter {
u32 msg_enable;
+ /* large_llq_header_enabled is used for two purposes:
+ * 1. Indicates that large LLQ has been requested.
+ * 2. Indicates whether large LLQ is set or not after device
+ * initialization / configuration.
+ */
+ bool large_llq_header_enabled;
+ bool large_llq_header_supported;
+
u16 max_tx_sgl_size;
u16 max_rx_sgl_size;
--
2.25.1
next prev parent reply other threads:[~2023-03-16 14:28 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-16 14:27 [PATCH v5 net-next 0/6] Add tx push buf len param to ethtool Shay Agroskin
2023-03-16 14:27 ` [PATCH v5 net-next 1/6] ethtool: Add support for configuring tx_push_buf_len Shay Agroskin
2023-03-16 19:56 ` Michal Kubiak
2023-03-16 14:27 ` [PATCH v5 net-next 2/6] net: ena: Make few cosmetic preparations to support large LLQ Shay Agroskin
2023-03-16 14:27 ` Shay Agroskin [this message]
2023-03-16 14:27 ` [PATCH v5 net-next 4/6] net: ena: Recalculate TX state variables every device reset Shay Agroskin
2023-03-16 14:27 ` [PATCH v5 net-next 5/6] net: ena: Add support to changing tx_push_buf_len Shay Agroskin
2023-03-16 14:27 ` [PATCH v5 net-next 6/6] net: ena: Advertise TX push support Shay Agroskin
2023-03-16 19:59 ` [PATCH v5 net-next 0/6] Add tx push buf len param to ethtool Michal Kubiak
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230316142706.4046263-4-shayagr@amazon.com \
--to=shayagr@amazon.com \
--cc=akiyano@amazon.com \
--cc=aliguori@amazon.com \
--cc=alisaidi@amazon.com \
--cc=benh@amazon.com \
--cc=darinzon@amazon.com \
--cc=davem@davemloft.net \
--cc=dwmw@amazon.com \
--cc=itzko@amazon.com \
--cc=kuba@kernel.org \
--cc=matua@amazon.com \
--cc=msw@amazon.com \
--cc=nafea@amazon.com \
--cc=ndagan@amazon.com \
--cc=netanel@amazon.com \
--cc=netdev@vger.kernel.org \
--cc=osamaabb@amazon.com \
--cc=saeedb@amazon.com \
--cc=zorik@amazon.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).