public inbox for linux-nfs@vger.kernel.org
 help / color / mirror / Atom feed
From: Anna Schumaker <schumaker.anna@gmail.com>
To: Chuck Lever <chuck.lever@oracle.com>,
	linux-rdma@vger.kernel.org, linux-nfs@vger.kernel.org
Subject: Re: [PATCH v1 10/11] xprtrdma: Extract sockaddr from struct rdma_cm_id
Date: Mon, 24 Feb 2020 11:15:09 -0500	[thread overview]
Message-ID: <57e16538f7a711d7671056d19abc38a09afc451d.camel@gmail.com> (raw)
In-Reply-To: <20200221220100.2072.45609.stgit@manet.1015granger.net>

Hi Chuck,

On Fri, 2020-02-21 at 17:01 -0500, Chuck Lever wrote:
> rpcrdma_cm_event_handler() is always passed an @id pointer that is
> valid. However, in a subsequent patch, we won't be able to extract
> an r_xprt in every case. So instead of using the r_xprt's
> presentation address strings, extract them from struct rdma_cm_id.
> 
> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
> ---
>  include/trace/events/rpcrdma.h |   78 +++++++++++++++++++++++++++----------
> ---
>  net/sunrpc/xprtrdma/verbs.c    |   33 +++++++----------
>  2 files changed, 67 insertions(+), 44 deletions(-)
> 
> diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
> index bebc45f7c570..a6d3a2122e9b 100644
> --- a/include/trace/events/rpcrdma.h
> +++ b/include/trace/events/rpcrdma.h
> @@ -373,47 +373,74 @@
>  
>  TRACE_EVENT(xprtrdma_inline_thresh,
>  	TP_PROTO(
> -		const struct rpcrdma_xprt *r_xprt
> +		const struct rpcrdma_ep *ep
>  	),
>  
> -	TP_ARGS(r_xprt),
> +	TP_ARGS(ep),
>  
>  	TP_STRUCT__entry(
> -		__field(const void *, r_xprt)
>  		__field(unsigned int, inline_send)
>  		__field(unsigned int, inline_recv)
>  		__field(unsigned int, max_send)
>  		__field(unsigned int, max_recv)
> -		__string(addr, rpcrdma_addrstr(r_xprt))
> -		__string(port, rpcrdma_portstr(r_xprt))
> +		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
> +		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
>  	),
>  
>  	TP_fast_assign(
> -		const struct rpcrdma_ep *ep = &r_xprt->rx_ep;
> +		const struct rdma_cm_id *id = ep->re_id;
>  
> -		__entry->r_xprt = r_xprt;
>  		__entry->inline_send = ep->re_inline_send;
>  		__entry->inline_recv = ep->re_inline_recv;
>  		__entry->max_send = ep->re_max_inline_send;
>  		__entry->max_recv = ep->re_max_inline_recv;
> -		__assign_str(addr, rpcrdma_addrstr(r_xprt));
> -		__assign_str(port, rpcrdma_portstr(r_xprt));
> +		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
> +		       sizeof(struct sockaddr_in6));
> +		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
> +		       sizeof(struct sockaddr_in6));
>  	),
>  
> -	TP_printk("peer=[%s]:%s r_xprt=%p neg send/recv=%u/%u, calc
> send/recv=%u/%u",
> -		__get_str(addr), __get_str(port), __entry->r_xprt,
> +	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
> +		__entry->srcaddr, __entry->dstaddr,
>  		__entry->inline_send, __entry->inline_recv,
>  		__entry->max_send, __entry->max_recv
>  	)
>  );
>  
> +TRACE_EVENT(xprtrdma_remove,
> +	TP_PROTO(
> +		const struct rpcrdma_ep *ep
> +	),
> +
> +	TP_ARGS(ep),
> +
> +	TP_STRUCT__entry(
> +		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
> +		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
> +		__string(name, ep->re_id->device->name)
> +	),
> +
> +	TP_fast_assign(
> +		const struct rdma_cm_id *id = ep->re_id;
> +
> +		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
> +		       sizeof(struct sockaddr_in6));
> +		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
> +		       sizeof(struct sockaddr_in6));
> +		__assign_str(name, id->device->name);
> +	),
> +
> +	TP_printk("%pISpc -> %pISpc device=%s",
> +		__entry->srcaddr, __entry->dstaddr, __get_str(name)
> +	)
> +);
> +
>  DEFINE_CONN_EVENT(connect);
>  DEFINE_CONN_EVENT(disconnect);
>  DEFINE_CONN_EVENT(flush_dct);
>  
>  DEFINE_RXPRT_EVENT(xprtrdma_create);
>  DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
> -DEFINE_RXPRT_EVENT(xprtrdma_remove);
>  DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
>  DEFINE_RXPRT_EVENT(xprtrdma_op_close);
>  DEFINE_RXPRT_EVENT(xprtrdma_op_setport);
> @@ -480,32 +507,33 @@
>  
>  TRACE_EVENT(xprtrdma_qp_event,
>  	TP_PROTO(
> -		const struct rpcrdma_xprt *r_xprt,
> +		const struct rpcrdma_ep *ep,
>  		const struct ib_event *event
>  	),
>  
> -	TP_ARGS(r_xprt, event),
> +	TP_ARGS(ep, event),
>  
>  	TP_STRUCT__entry(
> -		__field(const void *, r_xprt)
> -		__field(unsigned int, event)
> +		__field(unsigned long, event)
>  		__string(name, event->device->name)
> -		__string(addr, rpcrdma_addrstr(r_xprt))
> -		__string(port, rpcrdma_portstr(r_xprt))
> +		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
> +		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
>  	),
>  
>  	TP_fast_assign(
> -		__entry->r_xprt = r_xprt;
> +		const struct rdma_cm_id *id = ep->re_id;
> +
>  		__entry->event = event->event;
>  		__assign_str(name, event->device->name);
> -		__assign_str(addr, rpcrdma_addrstr(r_xprt));
> -		__assign_str(port, rpcrdma_portstr(r_xprt));
> +		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
> +		       sizeof(struct sockaddr_in6));
> +		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
> +		       sizeof(struct sockaddr_in6));
>  	),
>  
> -	TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
> -		__get_str(addr), __get_str(port), __entry->r_xprt,
> -		__get_str(name), rdma_show_ib_event(__entry->event),
> -		__entry->event
> +	TP_printk("%pISpc -> %pISpc device=%s %s (%lu)",
> +		__entry->srcaddr, __entry->dstaddr, __get_str(name),
> +		rdma_show_ib_event(__entry->event), __entry->event
>  	)
>  );
>  
> diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
> index 10826982ddf8..5cb308fb4f0f 100644
> --- a/net/sunrpc/xprtrdma/verbs.c
> +++ b/net/sunrpc/xprtrdma/verbs.c
> @@ -116,16 +116,14 @@ static void rpcrdma_xprt_drain(struct rpcrdma_xprt
> *r_xprt)
>   * @context: ep that owns QP where event occurred
>   *
>   * Called from the RDMA provider (device driver) possibly in an interrupt
> - * context.
> + * context. The QP is always destroyed before the ID, so the ID will be
> + * reliably available when this handler is invoked.
>   */
> -static void
> -rpcrdma_qp_event_handler(struct ib_event *event, void *context)
> +static void rpcrdma_qp_event_handler(struct ib_event *event, void *context)
>  {
>  	struct rpcrdma_ep *ep = context;
> -	struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
> -						   rx_ep);
>  
> -	trace_xprtrdma_qp_event(r_xprt, event);
> +	trace_xprtrdma_qp_event(ep, event);
>  }
>  
>  /**
> @@ -202,11 +200,10 @@ static void rpcrdma_wc_receive(struct ib_cq *cq, struct
> ib_wc *wc)
>  	rpcrdma_rep_destroy(rep);
>  }
>  
> -static void rpcrdma_update_cm_private(struct rpcrdma_xprt *r_xprt,
> +static void rpcrdma_update_cm_private(struct rpcrdma_ep *ep,
>  				      struct rdma_conn_param *param)
>  {
>  	const struct rpcrdma_connect_private *pmsg = param->private_data;
> -	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
>  	unsigned int rsize, wsize;
>  
>  	/* Default settings for RPC-over-RDMA Version One */
> @@ -241,6 +238,7 @@ static void rpcrdma_update_cm_private(struct rpcrdma_xprt
> *r_xprt,
>  static int
>  rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
>  {
> +	struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr;

Is there an clean way to put this inside the CONFIG_SUNRPC_DEBUG lines below?
I'm getting an "unused variable 'sap'" warning when CONFIG_SUNRPC_DEBUG=n.

Thanks,
Anna

>  	struct rpcrdma_xprt *r_xprt = id->context;
>  	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
>  	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
> @@ -264,23 +262,22 @@ static void rpcrdma_update_cm_private(struct
> rpcrdma_xprt *r_xprt,
>  		return 0;
>  	case RDMA_CM_EVENT_DEVICE_REMOVAL:
>  #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
> -		pr_info("rpcrdma: removing device %s for %s:%s\n",
> -			ep->re_id->device->name,
> -			rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
> +		pr_info("rpcrdma: removing device %s for %pISpc\n",
> +			ep->re_id->device->name, sap);
>  #endif
>  		init_completion(&ep->re_remove_done);
>  		ep->re_connect_status = -ENODEV;
>  		xprt_force_disconnect(xprt);
>  		wait_for_completion(&ep->re_remove_done);
> -		trace_xprtrdma_remove(r_xprt);
> +		trace_xprtrdma_remove(ep);
>  
>  		/* Return 1 to ensure the core destroys the id. */
>  		return 1;
>  	case RDMA_CM_EVENT_ESTABLISHED:
>  		++xprt->connect_cookie;
>  		ep->re_connect_status = 1;
> -		rpcrdma_update_cm_private(r_xprt, &event->param.conn);
> -		trace_xprtrdma_inline_thresh(r_xprt);
> +		rpcrdma_update_cm_private(ep, &event->param.conn);
> +		trace_xprtrdma_inline_thresh(ep);
>  		wake_up_all(&ep->re_connect_wait);
>  		break;
>  	case RDMA_CM_EVENT_CONNECT_ERROR:
> @@ -290,9 +287,8 @@ static void rpcrdma_update_cm_private(struct rpcrdma_xprt
> *r_xprt,
>  		ep->re_connect_status = -ENETUNREACH;
>  		goto disconnected;
>  	case RDMA_CM_EVENT_REJECTED:
> -		dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
> -			rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
> -			rdma_reject_msg(id, event->status));
> +		dprintk("rpcrdma: connection to %pISpc rejected: %s\n",
> +			sap, rdma_reject_msg(id, event->status));
>  		ep->re_connect_status = -ECONNREFUSED;
>  		if (event->status == IB_CM_REJ_STALE_CONN)
>  			ep->re_connect_status = -EAGAIN;
> @@ -307,8 +303,7 @@ static void rpcrdma_update_cm_private(struct rpcrdma_xprt
> *r_xprt,
>  		break;
>  	}
>  
> -	dprintk("RPC:       %s: %s:%s on %s/frwr: %s\n", __func__,
> -		rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
> +	dprintk("RPC:       %s: %pISpc on %s/frwr: %s\n", __func__, sap,
>  		ep->re_id->device->name, rdma_event_msg(event->event));
>  	return 0;
>  }
> 


  reply	other threads:[~2020-02-24 16:15 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-21 22:00 [PATCH v1 00/11] NFS/RDMA client side connection overhaul Chuck Lever
2020-02-21 22:00 ` [PATCH v1 01/11] xprtrdma: Invoke rpcrdma_ep_create() in the connect worker Chuck Lever
2020-02-21 22:00 ` [PATCH v1 02/11] xprtrdma: Refactor frwr_init_mr() Chuck Lever
2020-02-21 22:00 ` [PATCH v1 03/11] xprtrdma: Clean up the post_send path Chuck Lever
2020-02-21 22:00 ` [PATCH v1 04/11] xprtrdma: Refactor rpcrdma_ep_connect() and rpcrdma_ep_disconnect() Chuck Lever
2020-02-21 22:00 ` [PATCH v1 05/11] xprtrdma: Allocate Protection Domain in rpcrdma_ep_create() Chuck Lever
2020-03-01 18:11   ` Tom Talpey
2020-03-01 18:29     ` Chuck Lever
2020-03-01 18:38       ` Tom Talpey
2020-02-21 22:00 ` [PATCH v1 06/11] xprtrdma: Invoke rpcrdma_ia_open in the connect worker Chuck Lever
2020-02-21 22:00 ` [PATCH v1 07/11] xprtrdma: Remove rpcrdma_ia::ri_flags Chuck Lever
2020-02-21 22:00 ` [PATCH v1 08/11] xprtrdma: Disconnect on flushed completion Chuck Lever
2020-02-21 22:00 ` [PATCH v1 09/11] xprtrdma: Merge struct rpcrdma_ia into struct rpcrdma_ep Chuck Lever
2020-02-21 22:01 ` [PATCH v1 10/11] xprtrdma: Extract sockaddr from struct rdma_cm_id Chuck Lever
2020-02-24 16:15   ` Anna Schumaker [this message]
2020-02-24 16:18     ` Chuck Lever
2020-02-24 16:23       ` Anna Schumaker
2020-02-21 22:01 ` [PATCH v1 11/11] xprtrdma: kmalloc rpcrdma_ep separate from rpcrdma_xprt Chuck Lever
2020-03-01 18:09 ` [PATCH v1 00/11] NFS/RDMA client side connection overhaul Tom Talpey
2020-03-01 18:12   ` Chuck Lever
     [not found] ` <AA5039EB-DDA0-44CA-B382-61BD544A330A@gmail.com>
2020-03-11 17:16   ` Schumaker, Anna

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=57e16538f7a711d7671056d19abc38a09afc451d.camel@gmail.com \
    --to=schumaker.anna@gmail.com \
    --cc=chuck.lever@oracle.com \
    --cc=linux-nfs@vger.kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox