public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* Re: [PATCH 1/1] Drivers: hv: vmbus: Support a vmbus API for efficiently sending page arrays
  2014-12-15 20:33 [PATCH 1/1] Drivers: hv: vmbus: Support a vmbus API for efficiently sending page arrays K. Y. Srinivasan
@ 2014-12-15 19:33 ` Greg KH
  2014-12-15 19:52   ` KY Srinivasan
  0 siblings, 1 reply; 3+ messages in thread
From: Greg KH @ 2014-12-15 19:33 UTC (permalink / raw)
  To: K. Y. Srinivasan; +Cc: linux-kernel, devel, olaf, apw, jasowang

On Mon, Dec 15, 2014 at 12:33:47PM -0800, K. Y. Srinivasan wrote:
> Currently, the API for sending a multi-page buffer over VMBUS is limited to
> a maximum pfn array of MAX_MULTIPAGE_BUFFER_COUNT. This limitation is
> not imposed by the host and unnecessarily limits the maximum payload
> that can be sent. Implement an API that does not have this restriction.
> 
> Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
> ---
>  drivers/hv/channel.c   |   44 ++++++++++++++++++++++++++++++++++++++++++++
>  include/linux/hyperv.h |   31 +++++++++++++++++++++++++++++++
>  2 files changed, 75 insertions(+), 0 deletions(-)
> 
> diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
> index c76ffbe..18c4f23 100644
> --- a/drivers/hv/channel.c
> +++ b/drivers/hv/channel.c
> @@ -686,6 +686,50 @@ EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
>  /*
>   * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
>   * using a GPADL Direct packet type.
> + * The buffer includes the vmbus descriptor.
> + */
> +int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
> +			      struct vmbus_packet_mpb_array *desc,
> +			      u32 desc_size,
> +			      void *buffer, u32 bufferlen, u64 requestid)
> +{
> +	int ret;
> +	u32 packetlen;
> +	u32 packetlen_aligned;
> +	struct kvec bufferlist[3];
> +	u64 aligned_data = 0;
> +	bool signal = false;
> +
> +	packetlen = desc_size + bufferlen;
> +	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
> +
> +	/* Setup the descriptor */
> +	desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
> +	desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
> +	desc->dataoffset8 = desc_size >> 3; /* in 8-bytes grandularity */
> +	desc->length8 = (u16)(packetlen_aligned >> 3);
> +	desc->transactionid = requestid;
> +	desc->rangecount = 1;
> +
> +	bufferlist[0].iov_base = desc;
> +	bufferlist[0].iov_len = desc_size;
> +	bufferlist[1].iov_base = buffer;
> +	bufferlist[1].iov_len = bufferlen;
> +	bufferlist[2].iov_base = &aligned_data;
> +	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
> +
> +	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
> +
> +	if (ret == 0 && signal)
> +		vmbus_setevent(channel);
> +
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
> +
> +/*
> + * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
> + * using a GPADL Direct packet type.
>   */
>  int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
>  				struct hv_multipage_buffer *multi_pagebuffer,
> diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
> index 08cfaff..8615b0d 100644
> --- a/include/linux/hyperv.h
> +++ b/include/linux/hyperv.h
> @@ -57,6 +57,18 @@ struct hv_multipage_buffer {
>  	u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
>  };
>  
> +/*
> + * Multiple-page buffer array; the pfn array is variable size:
> + * The number of entries in the PFN array is determined by
> + * "len" and "offset".
> + */
> +struct hv_mpb_array {
> +	/* Length and Offset determines the # of pfns in the array */
> +	u32 len;
> +	u32 offset;
> +	u64 pfn_array[];
> +};

Does this cross the user/kernel boundry?  If so, they need to be __u32
and __u64 variables.

> +
>  /* 0x18 includes the proprietary packet header */
>  #define MAX_PAGE_BUFFER_PACKET		(0x18 +			\
>  					(sizeof(struct hv_page_buffer) * \
> @@ -812,6 +824,18 @@ struct vmbus_channel_packet_multipage_buffer {
>  	struct hv_multipage_buffer range;
>  } __packed;
>  
> +/* The format must be the same as struct vmdata_gpa_direct */
> +struct vmbus_packet_mpb_array {
> +	u16 type;
> +	u16 dataoffset8;
> +	u16 length8;
> +	u16 flags;
> +	u64 transactionid;
> +	u32 reserved;
> +	u32 rangecount;         /* Always 1 in this case */
> +	struct hv_mpb_array range;
> +} __packed;

Same here.

thanks,

greg k-h

^ permalink raw reply	[flat|nested] 3+ messages in thread

* RE: [PATCH 1/1] Drivers: hv: vmbus: Support a vmbus API for efficiently sending page arrays
  2014-12-15 19:33 ` Greg KH
@ 2014-12-15 19:52   ` KY Srinivasan
  0 siblings, 0 replies; 3+ messages in thread
From: KY Srinivasan @ 2014-12-15 19:52 UTC (permalink / raw)
  To: Greg KH
  Cc: linux-kernel@vger.kernel.org, devel@linuxdriverproject.org,
	olaf@aepfle.de, apw@canonical.com, jasowang@redhat.com



> -----Original Message-----
> From: Greg KH [mailto:gregkh@linuxfoundation.org]
> Sent: Monday, December 15, 2014 11:34 AM
> To: KY Srinivasan
> Cc: linux-kernel@vger.kernel.org; devel@linuxdriverproject.org;
> olaf@aepfle.de; apw@canonical.com; jasowang@redhat.com
> Subject: Re: [PATCH 1/1] Drivers: hv: vmbus: Support a vmbus API for
> efficiently sending page arrays
> 
> On Mon, Dec 15, 2014 at 12:33:47PM -0800, K. Y. Srinivasan wrote:
> > Currently, the API for sending a multi-page buffer over VMBUS is
> > limited to a maximum pfn array of MAX_MULTIPAGE_BUFFER_COUNT.
> This
> > limitation is not imposed by the host and unnecessarily limits the
> > maximum payload that can be sent. Implement an API that does not have
> this restriction.
> >
> > Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
> > ---
> >  drivers/hv/channel.c   |   44
> ++++++++++++++++++++++++++++++++++++++++++++
> >  include/linux/hyperv.h |   31 +++++++++++++++++++++++++++++++
> >  2 files changed, 75 insertions(+), 0 deletions(-)
> >
> > diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index
> > c76ffbe..18c4f23 100644
> > --- a/drivers/hv/channel.c
> > +++ b/drivers/hv/channel.c
> > @@ -686,6 +686,50 @@
> EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
> >  /*
> >   * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
> >   * using a GPADL Direct packet type.
> > + * The buffer includes the vmbus descriptor.
> > + */
> > +int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
> > +			      struct vmbus_packet_mpb_array *desc,
> > +			      u32 desc_size,
> > +			      void *buffer, u32 bufferlen, u64 requestid) {
> > +	int ret;
> > +	u32 packetlen;
> > +	u32 packetlen_aligned;
> > +	struct kvec bufferlist[3];
> > +	u64 aligned_data = 0;
> > +	bool signal = false;
> > +
> > +	packetlen = desc_size + bufferlen;
> > +	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
> > +
> > +	/* Setup the descriptor */
> > +	desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
> > +	desc->flags =
> VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
> > +	desc->dataoffset8 = desc_size >> 3; /* in 8-bytes grandularity */
> > +	desc->length8 = (u16)(packetlen_aligned >> 3);
> > +	desc->transactionid = requestid;
> > +	desc->rangecount = 1;
> > +
> > +	bufferlist[0].iov_base = desc;
> > +	bufferlist[0].iov_len = desc_size;
> > +	bufferlist[1].iov_base = buffer;
> > +	bufferlist[1].iov_len = bufferlen;
> > +	bufferlist[2].iov_base = &aligned_data;
> > +	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
> > +
> > +	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
> > +&signal);
> > +
> > +	if (ret == 0 && signal)
> > +		vmbus_setevent(channel);
> > +
> > +	return ret;
> > +}
> > +EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
> > +
> > +/*
> > + * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
> > + * using a GPADL Direct packet type.
> >   */
> >  int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
> >  				struct hv_multipage_buffer
> *multi_pagebuffer, diff --git
> > a/include/linux/hyperv.h b/include/linux/hyperv.h index
> > 08cfaff..8615b0d 100644
> > --- a/include/linux/hyperv.h
> > +++ b/include/linux/hyperv.h
> > @@ -57,6 +57,18 @@ struct hv_multipage_buffer {
> >  	u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
> >  };
> >
> > +/*
> > + * Multiple-page buffer array; the pfn array is variable size:
> > + * The number of entries in the PFN array is determined by
> > + * "len" and "offset".
> > + */
> > +struct hv_mpb_array {
> > +	/* Length and Offset determines the # of pfns in the array */
> > +	u32 len;
> > +	u32 offset;
> > +	u64 pfn_array[];
> > +};
> 
> Does this cross the user/kernel boundry?  If so, they need to be __u32 and
> __u64 variables.

This does not cross user/kernel boundry. 
> 
> > +
> >  /* 0x18 includes the proprietary packet header */
> >  #define MAX_PAGE_BUFFER_PACKET		(0x18 +
> 	\
> >  					(sizeof(struct hv_page_buffer) * \
> @@ -812,6 +824,18 @@ struct
> > vmbus_channel_packet_multipage_buffer {
> >  	struct hv_multipage_buffer range;
> >  } __packed;
> >
> > +/* The format must be the same as struct vmdata_gpa_direct */ struct
> > +vmbus_packet_mpb_array {
> > +	u16 type;
> > +	u16 dataoffset8;
> > +	u16 length8;
> > +	u16 flags;
> > +	u64 transactionid;
> > +	u32 reserved;
> > +	u32 rangecount;         /* Always 1 in this case */
> > +	struct hv_mpb_array range;
> > +} __packed;
> 
> Same here.
This is completely internal to the kernel.

Regards,

K. Y
> 
> thanks,
> 
> greg k-h

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 1/1] Drivers: hv: vmbus: Support a vmbus API for efficiently sending page arrays
@ 2014-12-15 20:33 K. Y. Srinivasan
  2014-12-15 19:33 ` Greg KH
  0 siblings, 1 reply; 3+ messages in thread
From: K. Y. Srinivasan @ 2014-12-15 20:33 UTC (permalink / raw)
  To: gregkh, linux-kernel, devel, olaf, apw, jasowang; +Cc: K. Y. Srinivasan

Currently, the API for sending a multi-page buffer over VMBUS is limited to
a maximum pfn array of MAX_MULTIPAGE_BUFFER_COUNT. This limitation is
not imposed by the host and unnecessarily limits the maximum payload
that can be sent. Implement an API that does not have this restriction.

Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
---
 drivers/hv/channel.c   |   44 ++++++++++++++++++++++++++++++++++++++++++++
 include/linux/hyperv.h |   31 +++++++++++++++++++++++++++++++
 2 files changed, 75 insertions(+), 0 deletions(-)

diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index c76ffbe..18c4f23 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -686,6 +686,50 @@ EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
 /*
  * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
  * using a GPADL Direct packet type.
+ * The buffer includes the vmbus descriptor.
+ */
+int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
+			      struct vmbus_packet_mpb_array *desc,
+			      u32 desc_size,
+			      void *buffer, u32 bufferlen, u64 requestid)
+{
+	int ret;
+	u32 packetlen;
+	u32 packetlen_aligned;
+	struct kvec bufferlist[3];
+	u64 aligned_data = 0;
+	bool signal = false;
+
+	packetlen = desc_size + bufferlen;
+	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
+
+	/* Setup the descriptor */
+	desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
+	desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
+	desc->dataoffset8 = desc_size >> 3; /* in 8-bytes grandularity */
+	desc->length8 = (u16)(packetlen_aligned >> 3);
+	desc->transactionid = requestid;
+	desc->rangecount = 1;
+
+	bufferlist[0].iov_base = desc;
+	bufferlist[0].iov_len = desc_size;
+	bufferlist[1].iov_base = buffer;
+	bufferlist[1].iov_len = bufferlen;
+	bufferlist[2].iov_base = &aligned_data;
+	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
+
+	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+
+	if (ret == 0 && signal)
+		vmbus_setevent(channel);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
+
+/*
+ * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
+ * using a GPADL Direct packet type.
  */
 int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
 				struct hv_multipage_buffer *multi_pagebuffer,
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 08cfaff..8615b0d 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -57,6 +57,18 @@ struct hv_multipage_buffer {
 	u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
 };
 
+/*
+ * Multiple-page buffer array; the pfn array is variable size:
+ * The number of entries in the PFN array is determined by
+ * "len" and "offset".
+ */
+struct hv_mpb_array {
+	/* Length and Offset determines the # of pfns in the array */
+	u32 len;
+	u32 offset;
+	u64 pfn_array[];
+};
+
 /* 0x18 includes the proprietary packet header */
 #define MAX_PAGE_BUFFER_PACKET		(0x18 +			\
 					(sizeof(struct hv_page_buffer) * \
@@ -812,6 +824,18 @@ struct vmbus_channel_packet_multipage_buffer {
 	struct hv_multipage_buffer range;
 } __packed;
 
+/* The format must be the same as struct vmdata_gpa_direct */
+struct vmbus_packet_mpb_array {
+	u16 type;
+	u16 dataoffset8;
+	u16 length8;
+	u16 flags;
+	u64 transactionid;
+	u32 reserved;
+	u32 rangecount;         /* Always 1 in this case */
+	struct hv_mpb_array range;
+} __packed;
+
 
 extern int vmbus_open(struct vmbus_channel *channel,
 			    u32 send_ringbuffersize,
@@ -843,6 +867,13 @@ extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
 					u32 bufferlen,
 					u64 requestid);
 
+extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
+				     struct vmbus_packet_mpb_array *mpb,
+				     u32 desc_size,
+				     void *buffer,
+				     u32 bufferlen,
+				     u64 requestid);
+
 extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
 				      void *kbuffer,
 				      u32 size,
-- 
1.7.4.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2014-12-15 19:52 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-12-15 20:33 [PATCH 1/1] Drivers: hv: vmbus: Support a vmbus API for efficiently sending page arrays K. Y. Srinivasan
2014-12-15 19:33 ` Greg KH
2014-12-15 19:52   ` KY Srinivasan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox