linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: Christophe de Dinechin <christophe.de.dinechin@gmail.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>,
	KVM list <kvm@vger.kernel.org>,
	"open list:VIRTIO GPU DRIVER"
	<virtualization@lists.linux-foundation.org>,
	netdev@vger.kernel.org, open list <linux-kernel@vger.kernel.org>,
	Peter Xu <peterx@redhat.com>,
	linux-mm@kvack.org, aarcange@redhat.com
Subject: Re: [RFC PATCH V2 2/5] vhost: fine grain userspace memory accessors
Date: Thu, 7 Mar 2019 10:38:30 +0800	[thread overview]
Message-ID: <ba91cc01-4e48-2743-f6ef-4aad4b821eb6@redhat.com> (raw)
In-Reply-To: <4C1386C5-F153-43DD-8B14-CC752FA5A07A@dinechin.org>


On 2019/3/6 下午6:45, Christophe de Dinechin wrote:
>
>> On 6 Mar 2019, at 08:18, Jason Wang <jasowang@redhat.com> wrote:
>>
>> This is used to hide the metadata address from virtqueue helpers. This
>> will allow to implement a vmap based fast accessing to metadata.
>>
>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>> ---
>> drivers/vhost/vhost.c | 94 +++++++++++++++++++++++++++++++++++++++++----------
>> 1 file changed, 77 insertions(+), 17 deletions(-)
>>
>> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
>> index 400aa78..29709e7 100644
>> --- a/drivers/vhost/vhost.c
>> +++ b/drivers/vhost/vhost.c
>> @@ -869,6 +869,34 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
>> 	ret; \
>> })
>>
>> +static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
>> +{
>> +	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
>> +			      vhost_avail_event(vq));
>> +}
>> +
>> +static inline int vhost_put_used(struct vhost_virtqueue *vq,
>> +				 struct vring_used_elem *head, int idx,
>> +				 int count)
>> +{
>> +	return vhost_copy_to_user(vq, vq->used->ring + idx, head,
>> +				  count * sizeof(*head));
>> +}
>> +
>> +static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
>> +
>> +{
>> +	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
>> +			      &vq->used->flags);
>> +}
>> +
>> +static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
>> +
>> +{
>> +	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
>> +			      &vq->used->idx);
>> +}
>> +
>> #define vhost_get_user(vq, x, ptr, type)		\
>> ({ \
>> 	int ret; \
>> @@ -907,6 +935,43 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d)
>> 		mutex_unlock(&d->vqs[i]->mutex);
>> }
>>
>> +static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
>> +				      __virtio16 *idx)
>> +{
>> +	return vhost_get_avail(vq, *idx, &vq->avail->idx);
>> +}
>> +
>> +static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
>> +				       __virtio16 *head, int idx)
>> +{
>> +	return vhost_get_avail(vq, *head,
>> +			       &vq->avail->ring[idx & (vq->num - 1)]);
>> +}
>> +
>> +static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
>> +					__virtio16 *flags)
>> +{
>> +	return vhost_get_avail(vq, *flags, &vq->avail->flags);
>> +}
>> +
>> +static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
>> +				       __virtio16 *event)
>> +{
>> +	return vhost_get_avail(vq, *event, vhost_used_event(vq));
>> +}
>> +
>> +static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
>> +				     __virtio16 *idx)
>> +{
>> +	return vhost_get_used(vq, *idx, &vq->used->idx);
>> +}
>> +
>> +static inline int vhost_get_desc(struct vhost_virtqueue *vq,
>> +				 struct vring_desc *desc, int idx)
>> +{
>> +	return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
>> +}
>> +
>> static int vhost_new_umem_range(struct vhost_umem *umem,
>> 				u64 start, u64 size, u64 end,
>> 				u64 userspace_addr, int perm)
>> @@ -1840,8 +1905,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
>> static int vhost_update_used_flags(struct vhost_virtqueue *vq)
>> {
>> 	void __user *used;
>> -	if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
>> -			   &vq->used->flags) < 0)
>> +	if (vhost_put_used_flags(vq))
>> 		return -EFAULT;
>> 	if (unlikely(vq->log_used)) {
>> 		/* Make sure the flag is seen before log. */
>> @@ -1858,8 +1922,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
>>
>> static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
>> {
>> -	if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
>> -			   vhost_avail_event(vq)))
>> +	if (vhost_put_avail_event(vq))
>> 		return -EFAULT;
>> 	if (unlikely(vq->log_used)) {
>> 		void __user *used;
>> @@ -1895,7 +1958,7 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
>> 		r = -EFAULT;
>> 		goto err;
>> 	}
>> -	r = vhost_get_used(vq, last_used_idx, &vq->used->idx);
>> +	r = vhost_get_used_idx(vq, &last_used_idx);
>> 	if (r) {
>> 		vq_err(vq, "Can't access used idx at %p\n",
>> 		       &vq->used->idx);
>  From the error case, it looks like you are not entirely encapsulating
> knowledge of what the accessor uses, i.e. it’s not:
>
> 		vq_err(vq, "Can't access used idx at %p\n",
> 		       &last_user_idx);
>
> Maybe move error message within accessor?


Good catch. Will fix but I still prefer to keep the place of vq_err(). 
Moving error message (if needed) could be done in the future.

Thanks


>
>> @@ -2094,7 +2157,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
>> 	last_avail_idx = vq->last_avail_idx;
>>
>> 	if (vq->avail_idx == vq->last_avail_idx) {
>> -		if (unlikely(vhost_get_avail(vq, avail_idx, &vq->avail->idx))) {
>> +		if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
>> 			vq_err(vq, "Failed to access avail idx at %p\n",
>> 				&vq->avail->idx);
>> 			return -EFAULT;
> Same here.
>
>> @@ -2121,8 +2184,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
>>
>> 	/* Grab the next descriptor number they're advertising, and increment
>> 	 * the index we've seen. */
>> -	if (unlikely(vhost_get_avail(vq, ring_head,
>> -		     &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
>> +	if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
>> 		vq_err(vq, "Failed to read head: idx %d address %p\n",
>> 		       last_avail_idx,
>> 		       &vq->avail->ring[last_avail_idx % vq->num]);
>> @@ -2157,8 +2219,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
>> 			       i, vq->num, head);
>> 			return -EINVAL;
>> 		}
>> -		ret = vhost_copy_from_user(vq, &desc, vq->desc + i,
>> -					   sizeof desc);
>> +		ret = vhost_get_desc(vq, &desc, i);
>> 		if (unlikely(ret)) {
>> 			vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
>> 			       i, vq->desc + i);
>> @@ -2251,7 +2312,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
>>
>> 	start = vq->last_used_idx & (vq->num - 1);
>> 	used = vq->used->ring + start;
>> -	if (vhost_copy_to_user(vq, used, heads, count * sizeof *used)) {
>> +	if (vhost_put_used(vq, heads, start, count)) {
>> 		vq_err(vq, "Failed to write used");
>> 		return -EFAULT;
>> 	}
>> @@ -2293,8 +2354,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
>>
>> 	/* Make sure buffer is written before we update index. */
>> 	smp_wmb();
>> -	if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
>> -			   &vq->used->idx)) {
>> +	if (vhost_put_used_idx(vq)) {
>> 		vq_err(vq, "Failed to increment used idx");
>> 		return -EFAULT;
>> 	}
>> @@ -2327,7 +2387,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
>>
>> 	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
>> 		__virtio16 flags;
>> -		if (vhost_get_avail(vq, flags, &vq->avail->flags)) {
>> +		if (vhost_get_avail_flags(vq, &flags)) {
>> 			vq_err(vq, "Failed to get flags");
>> 			return true;
>> 		}
>> @@ -2341,7 +2401,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
>> 	if (unlikely(!v))
>> 		return true;
>>
>> -	if (vhost_get_avail(vq, event, vhost_used_event(vq))) {
>> +	if (vhost_get_used_event(vq, &event)) {
>> 		vq_err(vq, "Failed to get used event idx");
>> 		return true;
>> 	}
>> @@ -2386,7 +2446,7 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
>> 	if (vq->avail_idx != vq->last_avail_idx)
>> 		return false;
>>
>> -	r = vhost_get_avail(vq, avail_idx, &vq->avail->idx);
>> +	r = vhost_get_avail_idx(vq, &avail_idx);
>> 	if (unlikely(r))
>> 		return false;
>> 	vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
>> @@ -2422,7 +2482,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
>> 	/* They could have slipped one in as we were doing that: make
>> 	 * sure it's written, then check again. */
>> 	smp_mb();
>> -	r = vhost_get_avail(vq, avail_idx, &vq->avail->idx);
>> +	r = vhost_get_avail_idx(vq, &avail_idx);
>> 	if (r) {
>> 		vq_err(vq, "Failed to check avail idx at %p: %d\n",
>> 		       &vq->avail->idx, r);
>> -- 
>> 1.8.3.1
>>


  reply	other threads:[~2019-03-07  2:38 UTC|newest]

Thread overview: 78+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-06  7:18 [RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap() Jason Wang
2019-03-06  7:18 ` [RFC PATCH V2 1/5] vhost: generalize adding used elem Jason Wang
2019-03-06  7:18 ` [RFC PATCH V2 2/5] vhost: fine grain userspace memory accessors Jason Wang
2019-03-06 10:45   ` Christophe de Dinechin
2019-03-07  2:38     ` Jason Wang [this message]
2019-03-06  7:18 ` [RFC PATCH V2 3/5] vhost: rename vq_iotlb_prefetch() to vq_meta_prefetch() Jason Wang
2019-03-06  7:18 ` [RFC PATCH V2 4/5] vhost: introduce helpers to get the size of metadata area Jason Wang
2019-03-06 10:56   ` Christophe de Dinechin
2019-03-07  2:40     ` Jason Wang
2019-03-06 18:43   ` Souptick Joarder
2019-03-07  2:42     ` Jason Wang
2019-03-06  7:18 ` [RFC PATCH V2 5/5] vhost: access vq metadata through kernel virtual address Jason Wang
2019-03-06 16:31   ` Michael S. Tsirkin
2019-03-07  2:45     ` Jason Wang
2019-03-07 15:34       ` Michael S. Tsirkin
2019-03-07 19:09         ` Jerome Glisse
2019-03-07 19:38           ` Andrea Arcangeli
2019-03-07 20:17             ` Jerome Glisse
2019-03-07 21:27               ` Andrea Arcangeli
2019-03-08  9:13                 ` Jason Wang
2019-03-08 19:11                   ` Andrea Arcangeli
2019-03-11  7:21                     ` Jason Wang
2019-03-11 14:45                 ` Jan Kara
2019-03-08  8:31         ` Jason Wang
2019-03-07 15:47   ` Michael S. Tsirkin
2019-03-07 17:56     ` Michael S. Tsirkin
2019-03-07 19:16       ` Andrea Arcangeli
2019-03-08  8:50         ` Jason Wang
2019-03-08 14:58           ` Jerome Glisse
2019-03-11  7:18             ` Jason Wang
2019-03-08 19:48           ` Andrea Arcangeli
2019-03-08 20:06             ` Jerome Glisse
2019-03-11  7:40             ` Jason Wang
2019-03-11 12:48               ` Michael S. Tsirkin
2019-03-11 13:43                 ` Andrea Arcangeli
2019-03-12  2:56                   ` Jason Wang
2019-03-12  3:51                     ` Michael S. Tsirkin
2019-03-12  2:52                 ` Jason Wang
2019-03-12  3:50                   ` Michael S. Tsirkin
2019-03-12  7:15                     ` Jason Wang
2019-03-07 19:17       ` Jerome Glisse
2019-03-08  2:21         ` Michael S. Tsirkin
2019-03-08  2:55           ` Jerome Glisse
2019-03-08  3:16             ` Michael S. Tsirkin
2019-03-08  3:40               ` Jerome Glisse
2019-03-08  3:43                 ` Michael S. Tsirkin
2019-03-08  3:45                   ` Jerome Glisse
2019-03-08  9:15                     ` Jason Wang
2019-03-08  8:58         ` Jason Wang
2019-03-08 12:56           ` Michael S. Tsirkin
2019-03-08 15:02             ` Jerome Glisse
2019-03-08 19:13           ` Andrea Arcangeli
2019-03-08 14:12 ` [RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap() Christoph Hellwig
2019-03-11  7:13   ` Jason Wang
2019-03-11 13:59     ` Michael S. Tsirkin
2019-03-11 18:14       ` David Miller
2019-03-12  2:59         ` Jason Wang
2019-03-12  3:52           ` Michael S. Tsirkin
2019-03-12  7:17             ` Jason Wang
2019-03-12 11:54               ` Michael S. Tsirkin
2019-03-12 15:46                 ` James Bottomley
2019-03-12 20:04                   ` Andrea Arcangeli
2019-03-12 20:53                     ` James Bottomley
2019-03-12 21:11                       ` Andrea Arcangeli
2019-03-12 21:19                         ` James Bottomley
2019-03-12 21:53                           ` Andrea Arcangeli
2019-03-12 22:02                             ` James Bottomley
2019-03-12 22:50                               ` Andrea Arcangeli
2019-03-12 22:57                                 ` James Bottomley
2019-03-13 16:05                       ` Christoph Hellwig
2019-03-13 16:37                         ` James Bottomley
2019-03-14 10:42                           ` Michael S. Tsirkin
2019-03-14 13:49                             ` Jason Wang
2019-03-14 19:33                               ` Andrea Arcangeli
2019-03-15  4:39                                 ` Jason Wang
2019-03-12  5:14           ` James Bottomley
2019-03-12  7:51             ` Jason Wang
2019-03-12  7:53               ` Jason Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ba91cc01-4e48-2743-f6ef-4aad4b821eb6@redhat.com \
    --to=jasowang@redhat.com \
    --cc=aarcange@redhat.com \
    --cc=christophe.de.dinechin@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=peterx@redhat.com \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).