linux-hyperv.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [bug report] RDMA/mana_ib: Add a driver for Microsoft Azure Network Adapter
@ 2022-10-10 10:55 Dan Carpenter
  2022-10-10 19:38 ` Long Li
  0 siblings, 1 reply; 4+ messages in thread
From: Dan Carpenter @ 2022-10-10 10:55 UTC (permalink / raw)
  To: longli; +Cc: linux-hyperv

Hello Long Li,

The patch 6dce3468a04c: "RDMA/mana_ib: Add a driver for Microsoft
Azure Network Adapter" from Sep 20, 2022, leads to the following
Smatch static checker warning:

	drivers/infiniband/hw/mana/qp.c:240 mana_ib_create_qp_rss()
	warn: 'mana_ind_table' was already freed.

drivers/infiniband/hw/mana/qp.c
    91 static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
    92                                  struct ib_qp_init_attr *attr,
    93                                  struct ib_udata *udata)
    94 {
    95         struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
    96         struct mana_ib_dev *mdev =
    97                 container_of(pd->device, struct mana_ib_dev, ib_dev);
    98         struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
    99         struct mana_ib_create_qp_rss_resp resp = {};
    100         struct mana_ib_create_qp_rss ucmd = {};
    101         struct gdma_dev *gd = mdev->gdma_dev;
    102         mana_handle_t *mana_ind_table;
    103         struct mana_port_context *mpc;
    104         struct mana_context *mc;
    105         struct net_device *ndev;
    106         struct mana_ib_cq *cq;
    107         struct mana_ib_wq *wq;
    108         struct ib_cq *ibcq;
    109         struct ib_wq *ibwq;
    110         int i = 0, ret;
    111         u32 port;
    112 
    113         mc = gd->driver_data;
    114 
    115         if (udata->inlen < sizeof(ucmd))
    116                 return -EINVAL;
    117 
    118         ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
    119         if (ret) {
    120                 ibdev_dbg(&mdev->ib_dev,
    121                           "Failed copy from udata for create rss-qp, err %d\n",
    122                           ret);
    123                 return -EFAULT;
    124         }
    125 
    126         if (attr->cap.max_recv_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
    127                 ibdev_dbg(&mdev->ib_dev,
    128                           "Requested max_recv_wr %d exceeding limit.\n",
    129                           attr->cap.max_recv_wr);
    130                 return -EINVAL;
    131         }
    132 
    133         if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
    134                 ibdev_dbg(&mdev->ib_dev,
    135                           "Requested max_recv_sge %d exceeding limit.\n",
    136                           attr->cap.max_recv_sge);
    137                 return -EINVAL;
    138         }
    139 
    140         if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
    141                 ibdev_dbg(&mdev->ib_dev,
    142                           "RX Hash function is not supported, %d\n",
    143                           ucmd.rx_hash_function);
    144                 return -EINVAL;
    145         }
    146 
    147         /* IB ports start with 1, MANA start with 0 */
    148         port = ucmd.port;
    149         if (port < 1 || port > mc->num_ports) {
    150                 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
    151                           port);
    152                 return -EINVAL;
    153         }
    154         ndev = mc->ports[port - 1];
    155         mpc = netdev_priv(ndev);
    156 
    157         ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
    158                   ucmd.rx_hash_function, port);
    159 
    160         mana_ind_table = kzalloc(sizeof(mana_handle_t) *
    161                                          (1 << ind_tbl->log_ind_tbl_size),
    162                                  GFP_KERNEL);
    163         if (!mana_ind_table) {
    164                 ret = -ENOMEM;
    165                 goto fail;
    166         }
    167 
    168         qp->port = port;
    169 
    170         for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
    171                 struct mana_obj_spec wq_spec = {};
    172                 struct mana_obj_spec cq_spec = {};
    173 
    174                 ibwq = ind_tbl->ind_tbl[i];
    175                 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
    176 
    177                 ibcq = ibwq->cq;
    178                 cq = container_of(ibcq, struct mana_ib_cq, ibcq);
    179 
    180                 wq_spec.gdma_region = wq->gdma_region;
    181                 wq_spec.queue_size = wq->wq_buf_size;
    182 
    183                 cq_spec.gdma_region = cq->gdma_region;
    184                 cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
    185                 cq_spec.modr_ctx_id = 0;
    186                 cq_spec.attached_eq = GDMA_CQ_NO_EQ;
    187 
    188                 ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
    189                                          &wq_spec, &cq_spec, &wq->rx_object);
    190                 if (ret)
    191                         goto fail;
    192 
    193                 /* The GDMA regions are now owned by the WQ object */
    194                 wq->gdma_region = GDMA_INVALID_DMA_REGION;
    195                 cq->gdma_region = GDMA_INVALID_DMA_REGION;
    196 
    197                 wq->id = wq_spec.queue_index;
    198                 cq->id = cq_spec.queue_index;
    199 
    200                 ibdev_dbg(&mdev->ib_dev,
    201                           "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
    202                           ret, wq->rx_object, wq->id, cq->id);
    203 
    204                 resp.entries[i].cqid = cq->id;
    205                 resp.entries[i].wqid = wq->id;
    206 
    207                 mana_ind_table[i] = wq->rx_object;
    208         }
    209         resp.num_entries = i;
    210 
    211         ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
    212                                          mana_ind_table,
    213                                          ind_tbl->log_ind_tbl_size,
    214                                          ucmd.rx_hash_key_len,
    215                                          ucmd.rx_hash_key);
    216         if (ret)
    217                 goto fail;
    218 
    219         kfree(mana_ind_table);

Freed here.

    220 
    221         if (udata) {
    222                 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
    223                 if (ret) {
    224                         ibdev_dbg(&mdev->ib_dev,
    225                                   "Failed to copy to udata create rss-qp, %d\n",
    226                                   ret);
    227                         goto fail;

Goto.

    228                 }
    229         }
    230 
    231         return 0;
    232 
    233 fail:
    234         while (i-- > 0) {
    235                 ibwq = ind_tbl->ind_tbl[i];
    236                 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
    237                 mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
    238         }
    239 
--> 240         kfree(mana_ind_table);

Double freed.

    241 
    242         return ret;
    243 }

regards,
dan carpenter

^ permalink raw reply	[flat|nested] 4+ messages in thread
* [bug report] RDMA/mana_ib: Add a driver for Microsoft Azure Network Adapter
@ 2022-10-12 11:55 Dan Carpenter
  2022-10-12 19:28 ` Long Li
  0 siblings, 1 reply; 4+ messages in thread
From: Dan Carpenter @ 2022-10-12 11:55 UTC (permalink / raw)
  To: longli; +Cc: linux-hyperv

Hello Long Li,

This is a semi-automatic email about new static checker warnings.

The patch 6dce3468a04c: "RDMA/mana_ib: Add a driver for Microsoft
Azure Network Adapter" from Sep 20, 2022, leads to the following
Smatch complaint:

    drivers/infiniband/hw/mana/qp.c:221 mana_ib_create_qp_rss()
    warn: variable dereferenced before check 'udata' (see line 115)

drivers/infiniband/hw/mana/qp.c
   114	
   115		if (udata->inlen < sizeof(ucmd))
                    ^^^^^^^^^^^^
This code assumes "udata" is non-NULL

   116			return -EINVAL;
   117	
   118		ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
   119		if (ret) {
   120			ibdev_dbg(&mdev->ib_dev,
   121				  "Failed copy from udata for create rss-qp, err %d\n",
   122				  ret);
   123			return -EFAULT;
   124		}
   125	
   126		if (attr->cap.max_recv_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
   127			ibdev_dbg(&mdev->ib_dev,
   128				  "Requested max_recv_wr %d exceeding limit.\n",
   129				  attr->cap.max_recv_wr);
   130			return -EINVAL;
   131		}
   132	
   133		if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
   134			ibdev_dbg(&mdev->ib_dev,
   135				  "Requested max_recv_sge %d exceeding limit.\n",
   136				  attr->cap.max_recv_sge);
   137			return -EINVAL;
   138		}
   139	
   140		if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
   141			ibdev_dbg(&mdev->ib_dev,
   142				  "RX Hash function is not supported, %d\n",
   143				  ucmd.rx_hash_function);
   144			return -EINVAL;
   145		}
   146	
   147		/* IB ports start with 1, MANA start with 0 */
   148		port = ucmd.port;
   149		if (port < 1 || port > mc->num_ports) {
   150			ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
   151				  port);
   152			return -EINVAL;
   153		}
   154		ndev = mc->ports[port - 1];
   155		mpc = netdev_priv(ndev);
   156	
   157		ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
   158			  ucmd.rx_hash_function, port);
   159	
   160		mana_ind_table = kzalloc(sizeof(mana_handle_t) *
   161						 (1 << ind_tbl->log_ind_tbl_size),
   162					 GFP_KERNEL);
   163		if (!mana_ind_table) {
   164			ret = -ENOMEM;
   165			goto fail;
   166		}
   167	
   168		qp->port = port;
   169	
   170		for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
   171			struct mana_obj_spec wq_spec = {};
   172			struct mana_obj_spec cq_spec = {};
   173	
   174			ibwq = ind_tbl->ind_tbl[i];
   175			wq = container_of(ibwq, struct mana_ib_wq, ibwq);
   176	
   177			ibcq = ibwq->cq;
   178			cq = container_of(ibcq, struct mana_ib_cq, ibcq);
   179	
   180			wq_spec.gdma_region = wq->gdma_region;
   181			wq_spec.queue_size = wq->wq_buf_size;
   182	
   183			cq_spec.gdma_region = cq->gdma_region;
   184			cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
   185			cq_spec.modr_ctx_id = 0;
   186			cq_spec.attached_eq = GDMA_CQ_NO_EQ;
   187	
   188			ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
   189						 &wq_spec, &cq_spec, &wq->rx_object);
   190			if (ret)
   191				goto fail;
   192	
   193			/* The GDMA regions are now owned by the WQ object */
   194			wq->gdma_region = GDMA_INVALID_DMA_REGION;
   195			cq->gdma_region = GDMA_INVALID_DMA_REGION;
   196	
   197			wq->id = wq_spec.queue_index;
   198			cq->id = cq_spec.queue_index;
   199	
   200			ibdev_dbg(&mdev->ib_dev,
   201				  "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
   202				  ret, wq->rx_object, wq->id, cq->id);
   203	
   204			resp.entries[i].cqid = cq->id;
   205			resp.entries[i].wqid = wq->id;
   206	
   207			mana_ind_table[i] = wq->rx_object;
   208		}
   209		resp.num_entries = i;
   210	
   211		ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
   212						 mana_ind_table,
   213						 ind_tbl->log_ind_tbl_size,
   214						 ucmd.rx_hash_key_len,
   215						 ucmd.rx_hash_key);
   216		if (ret)
   217			goto fail;
   218	
   219		kfree(mana_ind_table);
   220	
   221		if (udata) {
                    ^^^^^
Can it be NULL?

   222			ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
   223			if (ret) {

regards,
dan carpenter

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-10-12 19:28 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-10-10 10:55 [bug report] RDMA/mana_ib: Add a driver for Microsoft Azure Network Adapter Dan Carpenter
2022-10-10 19:38 ` Long Li
  -- strict thread matches above, loose matches on Subject: below --
2022-10-12 11:55 Dan Carpenter
2022-10-12 19:28 ` Long Li

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).