From: Tony Lu <tonylu@linux.alibaba.com>
To: "D. Wythe" <alibuda@linux.alibaba.com>
Cc: kgraul@linux.ibm.com, wenjia@linux.ibm.com, kuba@kernel.org,
davem@davemloft.net, netdev@vger.kernel.org,
linux-s390@vger.kernel.org, linux-rdma@vger.kernel.org
Subject: Re: [PATCH net-next 08/10] net/smc: replace mutex rmbs_lock and sndbufs_lock with rw_semaphore
Date: Tue, 16 Aug 2022 16:37:45 +0800 [thread overview]
Message-ID: <YvtXWfnWTP5P9ePT@TonyMac-Alibaba> (raw)
In-Reply-To: <b4e23c1ef29d567661de46a79c00e48a01344366.1660152975.git.alibuda@linux.alibaba.com>
On Thu, Aug 11, 2022 at 01:47:39AM +0800, D. Wythe wrote:
> From: "D. Wythe" <alibuda@linux.alibaba.com>
>
> It's clear that rmbs_lock and sndbufs_lock are aims to protect the
> rmbs list or the sndbufs list.
>
> During conenction establieshment, smc_buf_get_slot() will always
conenction -> connection
> be invoke, and it only performs read semantics in rmbs list and
invoke -> invoked.
> sndbufs list.
>
> Based on the above considerations, we replace mutex with rw_semaphore.
> Only smc_buf_get_slot() use down_read() to allow smc_buf_get_slot()
> run concurrently, other part use down_write() to keep exclusive
> semantics.
>
> Signed-off-by: D. Wythe <alibuda@linux.alibaba.com>
> ---
> net/smc/smc_core.c | 55 +++++++++++++++++++++++++++---------------------------
> net/smc/smc_core.h | 4 ++--
> net/smc/smc_llc.c | 16 ++++++++--------
> 3 files changed, 38 insertions(+), 37 deletions(-)
>
> diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
> index 113804d..b90970a 100644
> --- a/net/smc/smc_core.c
> +++ b/net/smc/smc_core.c
> @@ -1138,8 +1138,8 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
> lgr->freeing = 0;
> lgr->vlan_id = ini->vlan_id;
> refcount_set(&lgr->refcnt, 1); /* set lgr refcnt to 1 */
> - mutex_init(&lgr->sndbufs_lock);
> - mutex_init(&lgr->rmbs_lock);
> + init_rwsem(&lgr->sndbufs_lock);
> + init_rwsem(&lgr->rmbs_lock);
> rwlock_init(&lgr->conns_lock);
> for (i = 0; i < SMC_RMBE_SIZES; i++) {
> INIT_LIST_HEAD(&lgr->sndbufs[i]);
> @@ -1380,7 +1380,7 @@ struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
> static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
> struct smc_link_group *lgr)
> {
> - struct mutex *lock; /* lock buffer list */
> + struct rw_semaphore *lock; /* lock buffer list */
> int rc;
>
> if (is_rmb && buf_desc->is_conf_rkey && !list_empty(&lgr->list)) {
> @@ -1400,9 +1400,9 @@ static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
> /* buf registration failed, reuse not possible */
> lock = is_rmb ? &lgr->rmbs_lock :
> &lgr->sndbufs_lock;
> - mutex_lock(lock);
> + down_write(lock);
> list_del(&buf_desc->list);
> - mutex_unlock(lock);
> + up_write(lock);
>
> smc_buf_free(lgr, is_rmb, buf_desc);
> } else {
> @@ -1506,15 +1506,16 @@ static void smcr_buf_unmap_lgr(struct smc_link *lnk)
> int i;
>
> for (i = 0; i < SMC_RMBE_SIZES; i++) {
> - mutex_lock(&lgr->rmbs_lock);
> + down_write(&lgr->rmbs_lock);
> list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list)
> smcr_buf_unmap_link(buf_desc, true, lnk);
> - mutex_unlock(&lgr->rmbs_lock);
> - mutex_lock(&lgr->sndbufs_lock);
> + up_write(&lgr->rmbs_lock);
> +
> + down_write(&lgr->sndbufs_lock);
> list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i],
> list)
> smcr_buf_unmap_link(buf_desc, false, lnk);
> - mutex_unlock(&lgr->sndbufs_lock);
> + up_write(&lgr->sndbufs_lock);
> }
> }
>
> @@ -2324,19 +2325,19 @@ int smc_uncompress_bufsize(u8 compressed)
> * buffer size; if not available, return NULL
> */
> static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
> - struct mutex *lock,
> + struct rw_semaphore *lock,
> struct list_head *buf_list)
> {
> struct smc_buf_desc *buf_slot;
>
> - mutex_lock(lock);
> + down_read(lock);
> list_for_each_entry(buf_slot, buf_list, list) {
> if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
> - mutex_unlock(lock);
> + up_read(lock);
> return buf_slot;
> }
> }
> - mutex_unlock(lock);
> + up_read(lock);
> return NULL;
> }
>
> @@ -2445,13 +2446,13 @@ int smcr_link_reg_buf(struct smc_link *link, struct smc_buf_desc *buf_desc)
> return 0;
> }
>
> -static int _smcr_buf_map_lgr(struct smc_link *lnk, struct mutex *lock,
> +static int _smcr_buf_map_lgr(struct smc_link *lnk, struct rw_semaphore *lock,
> struct list_head *lst, bool is_rmb)
> {
> struct smc_buf_desc *buf_desc, *bf;
> int rc = 0;
>
> - mutex_lock(lock);
> + down_write(lock);
> list_for_each_entry_safe(buf_desc, bf, lst, list) {
> if (!buf_desc->used)
> continue;
> @@ -2460,7 +2461,7 @@ static int _smcr_buf_map_lgr(struct smc_link *lnk, struct mutex *lock,
> goto out;
> }
> out:
> - mutex_unlock(lock);
> + up_write(lock);
> return rc;
> }
>
> @@ -2493,37 +2494,37 @@ int smcr_buf_reg_lgr(struct smc_link *lnk)
> int i, rc = 0;
>
> /* reg all RMBs for a new link */
> - mutex_lock(&lgr->rmbs_lock);
> + down_write(&lgr->rmbs_lock);
> for (i = 0; i < SMC_RMBE_SIZES; i++) {
> list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) {
> if (!buf_desc->used)
> continue;
> rc = smcr_link_reg_buf(lnk, buf_desc);
> if (rc) {
> - mutex_unlock(&lgr->rmbs_lock);
> + up_write(&lgr->rmbs_lock);
> return rc;
> }
> }
> }
> - mutex_unlock(&lgr->rmbs_lock);
> + up_write(&lgr->rmbs_lock);
>
> if (lgr->buf_type == SMCR_PHYS_CONT_BUFS)
> return rc;
>
> /* reg all vzalloced sndbufs for a new link */
> - mutex_lock(&lgr->sndbufs_lock);
> + down_write(&lgr->sndbufs_lock);
> for (i = 0; i < SMC_RMBE_SIZES; i++) {
> list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i], list) {
> if (!buf_desc->used || !buf_desc->is_vm)
> continue;
> rc = smcr_link_reg_buf(lnk, buf_desc);
> if (rc) {
> - mutex_unlock(&lgr->sndbufs_lock);
> + up_write(&lgr->sndbufs_lock);
> return rc;
> }
> }
> }
> - mutex_unlock(&lgr->sndbufs_lock);
> + up_write(&lgr->sndbufs_lock);
> return rc;
> }
>
> @@ -2641,7 +2642,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
> struct list_head *buf_list;
> int bufsize, bufsize_short;
> bool is_dgraded = false;
> - struct mutex *lock; /* lock buffer list */
> + struct rw_semaphore *lock; /* lock buffer list */
> int sk_buf_size;
>
> if (is_rmb)
> @@ -2689,9 +2690,9 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
> SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rmb);
> SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
> buf_desc->used = 1;
> - mutex_lock(lock);
> + down_write(lock);
> list_add(&buf_desc->list, buf_list);
> - mutex_unlock(lock);
> + up_write(lock);
> break; /* found */
> }
>
> @@ -2765,9 +2766,9 @@ int smc_buf_create(struct smc_sock *smc, bool is_smcd)
> /* create rmb */
> rc = __smc_buf_create(smc, is_smcd, true);
> if (rc) {
> - mutex_lock(&smc->conn.lgr->sndbufs_lock);
> + down_write(&smc->conn.lgr->sndbufs_lock);
> list_del(&smc->conn.sndbuf_desc->list);
> - mutex_unlock(&smc->conn.lgr->sndbufs_lock);
> + up_write(&smc->conn.lgr->sndbufs_lock);
> smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
> smc->conn.sndbuf_desc = NULL;
> }
> diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
> index 559d330..008148c 100644
> --- a/net/smc/smc_core.h
> +++ b/net/smc/smc_core.h
> @@ -300,9 +300,9 @@ struct smc_link_group {
> unsigned short vlan_id; /* vlan id of link group */
>
> struct list_head sndbufs[SMC_RMBE_SIZES];/* tx buffers */
> - struct mutex sndbufs_lock; /* protects tx buffers */
> + struct rw_semaphore sndbufs_lock; /* protects tx buffers */
> struct list_head rmbs[SMC_RMBE_SIZES]; /* rx buffers */
> - struct mutex rmbs_lock; /* protects rx buffers */
> + struct rw_semaphore rmbs_lock; /* protects rx buffers */
>
> u8 id[SMC_LGR_ID_SIZE]; /* unique lgr id */
> struct delayed_work free_work; /* delayed freeing of an lgr */
> diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
> index d744937..76f9906 100644
> --- a/net/smc/smc_llc.c
> +++ b/net/smc/smc_llc.c
> @@ -642,7 +642,7 @@ static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
>
> prim_lnk_idx = link->link_idx;
> lnk_idx = link_new->link_idx;
> - mutex_lock(&lgr->rmbs_lock);
> + down_write(&lgr->rmbs_lock);
> ext->num_rkeys = lgr->conns_num;
> if (!ext->num_rkeys)
> goto out;
> @@ -662,7 +662,7 @@ static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
> }
> len += i * sizeof(ext->rt[0]);
> out:
> - mutex_unlock(&lgr->rmbs_lock);
> + up_write(&lgr->rmbs_lock);
> return len;
> }
>
> @@ -923,7 +923,7 @@ static int smc_llc_cli_rkey_exchange(struct smc_link *link,
> int rc = 0;
> int i;
>
> - mutex_lock(&lgr->rmbs_lock);
> + down_write(&lgr->rmbs_lock);
> num_rkeys_send = lgr->conns_num;
> buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
> do {
> @@ -950,7 +950,7 @@ static int smc_llc_cli_rkey_exchange(struct smc_link *link,
> break;
> } while (num_rkeys_send || num_rkeys_recv);
>
> - mutex_unlock(&lgr->rmbs_lock);
> + up_write(&lgr->rmbs_lock);
> return rc;
> }
>
> @@ -1033,14 +1033,14 @@ static void smc_llc_save_add_link_rkeys(struct smc_link *link,
> ext = (struct smc_llc_msg_add_link_v2_ext *)((u8 *)lgr->wr_rx_buf_v2 +
> SMC_WR_TX_SIZE);
> max = min_t(u8, ext->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
> - mutex_lock(&lgr->rmbs_lock);
> + down_write(&lgr->rmbs_lock);
> for (i = 0; i < max; i++) {
> smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
> ext->rt[i].rmb_key,
> ext->rt[i].rmb_vaddr_new,
> ext->rt[i].rmb_key_new);
> }
> - mutex_unlock(&lgr->rmbs_lock);
> + up_write(&lgr->rmbs_lock);
> }
>
> static void smc_llc_save_add_link_info(struct smc_link *link,
> @@ -1349,7 +1349,7 @@ static int smc_llc_srv_rkey_exchange(struct smc_link *link,
> int rc = 0;
> int i;
>
> - mutex_lock(&lgr->rmbs_lock);
> + down_write(&lgr->rmbs_lock);
> num_rkeys_send = lgr->conns_num;
> buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
> do {
> @@ -1374,7 +1374,7 @@ static int smc_llc_srv_rkey_exchange(struct smc_link *link,
> smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
> } while (num_rkeys_send || num_rkeys_recv);
> out:
> - mutex_unlock(&lgr->rmbs_lock);
> + up_write(&lgr->rmbs_lock);
> return rc;
> }
>
> --
> 1.8.3.1
next prev parent reply other threads:[~2022-08-16 9:56 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-08-10 17:47 [PATCH net-next 00/10] net/smc: optimize the parallelism of SMC-R connections D. Wythe
2022-08-10 17:47 ` [PATCH net-next 01/10] net/smc: remove locks smc_client_lgr_pending and smc_server_lgr_pending D. Wythe
2022-08-11 3:41 ` kernel test robot
2022-08-11 11:51 ` kernel test robot
2022-08-16 9:43 ` Jan Karcher
2022-08-16 12:47 ` Tony Lu
2022-08-16 12:52 ` Tony Lu
2022-08-10 17:47 ` [PATCH net-next 02/10] net/smc: fix SMC_CLC_DECL_ERR_REGRMB without smc_server_lgr_pending D. Wythe
2022-08-16 7:58 ` Tony Lu
2022-08-10 17:47 ` [PATCH net-next 03/10] net/smc: allow confirm/delete rkey response deliver multiplex D. Wythe
2022-08-16 8:17 ` Tony Lu
2022-08-10 17:47 ` [PATCH net-next 04/10] net/smc: make SMC_LLC_FLOW_RKEY run concurrently D. Wythe
2022-08-10 17:47 ` [PATCH net-next 05/10] net/smc: llc_conf_mutex refactor, replace it with rw_semaphore D. Wythe
2022-08-10 17:47 ` [PATCH net-next 06/10] net/smc: use read semaphores to reduce unnecessary blocking in smc_buf_create() & smcr_buf_unuse() D. Wythe
2022-08-10 17:47 ` [PATCH net-next 07/10] net/smc: reduce unnecessary blocking in smcr_lgr_reg_rmbs() D. Wythe
2022-08-16 8:24 ` Tony Lu
2022-08-10 17:47 ` [PATCH net-next 08/10] net/smc: replace mutex rmbs_lock and sndbufs_lock with rw_semaphore D. Wythe
2022-08-16 8:37 ` Tony Lu [this message]
2022-08-10 17:47 ` [PATCH net-next 09/10] net/smc: fix potential panic dues to unprotected smc_llc_srv_add_link() D. Wythe
2022-08-16 8:28 ` Tony Lu
2022-08-10 17:47 ` [PATCH net-next 10/10] net/smc: fix application data exception D. Wythe
2022-08-11 3:28 ` [PATCH net-next 00/10] net/smc: optimize the parallelism of SMC-R connections Jakub Kicinski
2022-08-11 5:13 ` Tony Lu
2022-08-11 12:31 ` Karsten Graul
2022-08-16 9:35 ` Jan Karcher
2022-08-16 12:40 ` Tony Lu
2022-08-17 4:55 ` D. Wythe
2022-08-17 16:52 ` Jan Karcher
2022-08-18 13:06 ` D. Wythe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=YvtXWfnWTP5P9ePT@TonyMac-Alibaba \
--to=tonylu@linux.alibaba.com \
--cc=alibuda@linux.alibaba.com \
--cc=davem@davemloft.net \
--cc=kgraul@linux.ibm.com \
--cc=kuba@kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=linux-s390@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=wenjia@linux.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).