From: Halil Pasic <pasic@linux.ibm.com>
To: Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Simon Horman <horms@kernel.org>,
"D. Wythe" <alibuda@linux.alibaba.com>,
Dust Li <dust.li@linux.alibaba.com>,
Sidraya Jayagond <sidraya@linux.ibm.com>,
Wenjia Zhang <wenjia@linux.ibm.com>,
Mahanta Jambigi <mjambigi@linux.ibm.com>,
Tony Lu <tonylu@linux.alibaba.com>,
Wen Gu <guwen@linux.alibaba.com>,
netdev@vger.kernel.org, linux-doc@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-rdma@vger.kernel.org,
linux-s390@vger.kernel.org
Cc: Halil Pasic <pasic@linux.ibm.com>
Subject: [PATCH net-next v3 2/2] net/smc: handle -ENOMEM from smc_wr_alloc_link_mem gracefully
Date: Sun, 21 Sep 2025 23:44:40 +0200 [thread overview]
Message-ID: <20250921214440.325325-3-pasic@linux.ibm.com> (raw)
In-Reply-To: <20250921214440.325325-1-pasic@linux.ibm.com>
Currently if a -ENOMEM from smc_wr_alloc_link_mem() is handled by
giving up and going the way of a TCP fallback. This was reasonable
before the sizes of the allocations there were compile time constants
and reasonably small. But now those are actually configurable.
So instead of giving up, keep retrying with half of the requested
size unless we dip below the old static sizes -- then give up!
Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
Reviewed-by: Wenjia Zhang <wenjia@linux.ibm.com>
Reviewed-by: Mahanta Jambigi <mjambigi@linux.ibm.com>
---
Documentation/networking/smc-sysctl.rst | 8 ++++--
net/smc/smc_core.c | 34 +++++++++++++++++--------
net/smc/smc_core.h | 2 ++
net/smc/smc_wr.c | 28 ++++++++++----------
4 files changed, 46 insertions(+), 26 deletions(-)
diff --git a/Documentation/networking/smc-sysctl.rst b/Documentation/networking/smc-sysctl.rst
index c94d750c7c84..c8dbe7ac8bdf 100644
--- a/Documentation/networking/smc-sysctl.rst
+++ b/Documentation/networking/smc-sysctl.rst
@@ -85,7 +85,9 @@ smcr_max_send_wr - INTEGER
Please be aware that all the buffers need to be allocated as a physically
continuous array in which each element is a single buffer and has the size
- of SMC_WR_BUF_SIZE (48) bytes. If the allocation fails we give up much
+ of SMC_WR_BUF_SIZE (48) bytes. If the allocation fails, we keep retrying
+ with half of the buffer count until it is ether successful or (unlikely)
+ we dip below the old hard coded value which is 16 where we give up much
like before having this control.
Default: 16
@@ -103,7 +105,9 @@ smcr_max_recv_wr - INTEGER
Please be aware that all the buffers need to be allocated as a physically
continuous array in which each element is a single buffer and has the size
- of SMC_WR_BUF_SIZE (48) bytes. If the allocation fails we give up much
+ of SMC_WR_BUF_SIZE (48) bytes. If the allocation fails, we keep retrying
+ with half of the buffer count until it is ether successful or (unlikely)
+ we dip below the old hard coded value which is 16 where we give up much
like before having this control.
Default: 48
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 2a559a98541c..f8131b4dfcd6 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -810,6 +810,8 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
lnk->clearing = 0;
lnk->path_mtu = lnk->smcibdev->pattr[lnk->ibport - 1].active_mtu;
lnk->link_id = smcr_next_link_id(lgr);
+ lnk->max_send_wr = lgr->max_send_wr;
+ lnk->max_recv_wr = lgr->max_recv_wr;
lnk->lgr = lgr;
smc_lgr_hold(lgr); /* lgr_put in smcr_link_clear() */
lnk->link_idx = link_idx;
@@ -836,27 +838,39 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
rc = smc_llc_link_init(lnk);
if (rc)
goto out;
- rc = smc_wr_alloc_link_mem(lnk);
- if (rc)
- goto clear_llc_lnk;
rc = smc_ib_create_protection_domain(lnk);
if (rc)
- goto free_link_mem;
- rc = smc_ib_create_queue_pair(lnk);
- if (rc)
- goto dealloc_pd;
+ goto clear_llc_lnk;
+ do {
+ rc = smc_ib_create_queue_pair(lnk);
+ if (rc)
+ goto dealloc_pd;
+ rc = smc_wr_alloc_link_mem(lnk);
+ if (!rc)
+ break;
+ else if (rc != -ENOMEM) /* give up */
+ goto destroy_qp;
+ /* retry with smaller ... */
+ lnk->max_send_wr /= 2;
+ lnk->max_recv_wr /= 2;
+ /* ... unless droping below old SMC_WR_BUF_SIZE */
+ if (lnk->max_send_wr < 16 || lnk->max_recv_wr < 48)
+ goto destroy_qp;
+ smc_ib_destroy_queue_pair(lnk);
+ } while (1);
+
rc = smc_wr_create_link(lnk);
if (rc)
- goto destroy_qp;
+ goto free_link_mem;
lnk->state = SMC_LNK_ACTIVATING;
return 0;
+free_link_mem:
+ smc_wr_free_link_mem(lnk);
destroy_qp:
smc_ib_destroy_queue_pair(lnk);
dealloc_pd:
smc_ib_dealloc_protection_domain(lnk);
-free_link_mem:
- smc_wr_free_link_mem(lnk);
clear_llc_lnk:
smc_llc_link_clear(lnk, false);
out:
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index ab2d15929cb2..b9addf633d8a 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -174,6 +174,8 @@ struct smc_link {
struct completion llc_testlink_resp; /* wait for rx of testlink */
int llc_testlink_time; /* testlink interval */
atomic_t conn_cnt; /* connections on this link */
+ u16 max_send_wr;
+ u16 max_recv_wr;
};
/* For now we just allow one parallel link per link group. The SMC protocol
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index f5b2772414fd..6b14711f0c93 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -548,9 +548,9 @@ void smc_wr_remember_qp_attr(struct smc_link *lnk)
IB_QP_DEST_QPN,
&init_attr);
- lnk->wr_tx_cnt = min_t(size_t, lnk->lgr->max_send_wr,
+ lnk->wr_tx_cnt = min_t(size_t, lnk->max_send_wr,
lnk->qp_attr.cap.max_send_wr);
- lnk->wr_rx_cnt = min_t(size_t, lnk->lgr->max_recv_wr,
+ lnk->wr_rx_cnt = min_t(size_t, lnk->max_recv_wr,
lnk->qp_attr.cap.max_recv_wr);
}
@@ -742,51 +742,51 @@ int smc_wr_alloc_lgr_mem(struct smc_link_group *lgr)
int smc_wr_alloc_link_mem(struct smc_link *link)
{
/* allocate link related memory */
- link->wr_tx_bufs = kcalloc(link->lgr->max_send_wr,
+ link->wr_tx_bufs = kcalloc(link->max_send_wr,
SMC_WR_BUF_SIZE, GFP_KERNEL);
if (!link->wr_tx_bufs)
goto no_mem;
- link->wr_rx_bufs = kcalloc(link->lgr->max_recv_wr, link->wr_rx_buflen,
+ link->wr_rx_bufs = kcalloc(link->max_recv_wr, link->wr_rx_buflen,
GFP_KERNEL);
if (!link->wr_rx_bufs)
goto no_mem_wr_tx_bufs;
- link->wr_tx_ibs = kcalloc(link->lgr->max_send_wr,
+ link->wr_tx_ibs = kcalloc(link->max_send_wr,
sizeof(link->wr_tx_ibs[0]), GFP_KERNEL);
if (!link->wr_tx_ibs)
goto no_mem_wr_rx_bufs;
- link->wr_rx_ibs = kcalloc(link->lgr->max_recv_wr,
+ link->wr_rx_ibs = kcalloc(link->max_recv_wr,
sizeof(link->wr_rx_ibs[0]),
GFP_KERNEL);
if (!link->wr_rx_ibs)
goto no_mem_wr_tx_ibs;
- link->wr_tx_rdmas = kcalloc(link->lgr->max_send_wr,
+ link->wr_tx_rdmas = kcalloc(link->max_send_wr,
sizeof(link->wr_tx_rdmas[0]),
GFP_KERNEL);
if (!link->wr_tx_rdmas)
goto no_mem_wr_rx_ibs;
- link->wr_tx_rdma_sges = kcalloc(link->lgr->max_send_wr,
+ link->wr_tx_rdma_sges = kcalloc(link->max_send_wr,
sizeof(link->wr_tx_rdma_sges[0]),
GFP_KERNEL);
if (!link->wr_tx_rdma_sges)
goto no_mem_wr_tx_rdmas;
- link->wr_tx_sges = kcalloc(link->lgr->max_send_wr, sizeof(link->wr_tx_sges[0]),
+ link->wr_tx_sges = kcalloc(link->max_send_wr, sizeof(link->wr_tx_sges[0]),
GFP_KERNEL);
if (!link->wr_tx_sges)
goto no_mem_wr_tx_rdma_sges;
- link->wr_rx_sges = kcalloc(link->lgr->max_recv_wr,
+ link->wr_rx_sges = kcalloc(link->max_recv_wr,
sizeof(link->wr_rx_sges[0]) * link->wr_rx_sge_cnt,
GFP_KERNEL);
if (!link->wr_rx_sges)
goto no_mem_wr_tx_sges;
- link->wr_tx_mask = bitmap_zalloc(link->lgr->max_send_wr, GFP_KERNEL);
+ link->wr_tx_mask = bitmap_zalloc(link->max_send_wr, GFP_KERNEL);
if (!link->wr_tx_mask)
goto no_mem_wr_rx_sges;
- link->wr_tx_pends = kcalloc(link->lgr->max_send_wr,
+ link->wr_tx_pends = kcalloc(link->max_send_wr,
sizeof(link->wr_tx_pends[0]),
GFP_KERNEL);
if (!link->wr_tx_pends)
goto no_mem_wr_tx_mask;
- link->wr_tx_compl = kcalloc(link->lgr->max_send_wr,
+ link->wr_tx_compl = kcalloc(link->max_send_wr,
sizeof(link->wr_tx_compl[0]),
GFP_KERNEL);
if (!link->wr_tx_compl)
@@ -907,7 +907,7 @@ int smc_wr_create_link(struct smc_link *lnk)
goto dma_unmap;
}
smc_wr_init_sge(lnk);
- bitmap_zero(lnk->wr_tx_mask, lnk->lgr->max_send_wr);
+ bitmap_zero(lnk->wr_tx_mask, lnk->max_send_wr);
init_waitqueue_head(&lnk->wr_tx_wait);
rc = percpu_ref_init(&lnk->wr_tx_refs, smcr_wr_tx_refs_free, 0, GFP_KERNEL);
if (rc)
--
2.48.1
next prev parent reply other threads:[~2025-09-21 21:45 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-21 21:44 [PATCH net-next v3 0/2] net/smc: make wr buffer count configurable Halil Pasic
2025-09-21 21:44 ` [PATCH net-next v3 1/2] " Halil Pasic
2025-09-24 17:27 ` Sidraya Jayagond
2025-09-25 9:27 ` Paolo Abeni
2025-09-25 11:25 ` Halil Pasic
2025-09-27 22:55 ` Halil Pasic
2025-09-28 2:02 ` Dust Li
2025-09-28 2:12 ` Dust Li
2025-09-28 8:39 ` Halil Pasic
2025-09-28 11:42 ` Dust Li
2025-09-28 18:32 ` Halil Pasic
2025-09-26 2:44 ` Guangguan Wang
2025-09-26 10:12 ` Halil Pasic
2025-09-26 10:30 ` Halil Pasic
2025-09-28 3:05 ` Guangguan Wang
2025-09-21 21:44 ` Halil Pasic [this message]
2025-09-24 17:28 ` [PATCH net-next v3 2/2] net/smc: handle -ENOMEM from smc_wr_alloc_link_mem gracefully Sidraya Jayagond
2025-09-25 9:40 ` Paolo Abeni
2025-09-25 15:05 ` Halil Pasic
2025-09-25 15:41 ` Paolo Abeni
2025-09-25 21:46 ` Halil Pasic
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250921214440.325325-3-pasic@linux.ibm.com \
--to=pasic@linux.ibm.com \
--cc=alibuda@linux.alibaba.com \
--cc=dust.li@linux.alibaba.com \
--cc=guwen@linux.alibaba.com \
--cc=horms@kernel.org \
--cc=kuba@kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=linux-s390@vger.kernel.org \
--cc=mjambigi@linux.ibm.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=sidraya@linux.ibm.com \
--cc=tonylu@linux.alibaba.com \
--cc=wenjia@linux.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).