* [PATCH v3 24/41] vhost: add memory access wrappers [not found] <1416829787-14252-1-git-send-email-mst@redhat.com> @ 2014-11-24 11:54 ` Michael S. Tsirkin 2014-11-24 11:54 ` [PATCH v3 25/41] vhost/net: force len for TX to host endian Michael S. Tsirkin ` (7 subsequent siblings) 8 siblings, 0 replies; 11+ messages in thread From: Michael S. Tsirkin @ 2014-11-24 11:54 UTC (permalink / raw) To: linux-kernel; +Cc: kvm, rusty, netdev, virtualization, pbonzini, David Miller Signed-off-by: Michael S. Tsirkin <mst@redhat.com> --- drivers/vhost/vhost.h | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 3eda654..b9032e8 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -174,6 +174,37 @@ enum { static inline int vhost_has_feature(struct vhost_virtqueue *vq, int bit) { - return vq->acked_features & (1 << bit); + return vq->acked_features & (1ULL << bit); +} + +/* Memory accessors */ +static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val) +{ + return __virtio16_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val); +} + +static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val) +{ + return __cpu_to_virtio16(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val); +} + +static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val) +{ + return __virtio32_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val); +} + +static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val) +{ + return __cpu_to_virtio32(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val); +} + +static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val) +{ + return __virtio64_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val); +} + +static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val) +{ + return __cpu_to_virtio64(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val); } #endif -- MST ^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v3 25/41] vhost/net: force len for TX to host endian [not found] <1416829787-14252-1-git-send-email-mst@redhat.com> 2014-11-24 11:54 ` [PATCH v3 24/41] vhost: add memory access wrappers Michael S. Tsirkin @ 2014-11-24 11:54 ` Michael S. Tsirkin 2014-11-24 11:54 ` [PATCH v3 26/41] vhost: virtio 1.0 endian-ness support Michael S. Tsirkin ` (6 subsequent siblings) 8 siblings, 0 replies; 11+ messages in thread From: Michael S. Tsirkin @ 2014-11-24 11:54 UTC (permalink / raw) To: linux-kernel Cc: David Miller, cornelia.huck, rusty, nab, pbonzini, kvm, virtualization, netdev We use native endian-ness internally but never expose it to guest. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> --- drivers/vhost/net.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 8dae2f7..dce5c58 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -48,15 +48,15 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;" * status internally; used for zerocopy tx only. */ /* Lower device DMA failed */ -#define VHOST_DMA_FAILED_LEN 3 +#define VHOST_DMA_FAILED_LEN ((__force __virtio32)3) /* Lower device DMA done */ -#define VHOST_DMA_DONE_LEN 2 +#define VHOST_DMA_DONE_LEN ((__force __virtio32)2) /* Lower device DMA in progress */ -#define VHOST_DMA_IN_PROGRESS 1 +#define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1) /* Buffer unused */ -#define VHOST_DMA_CLEAR_LEN 0 +#define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0) -#define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN) +#define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN) enum { VHOST_NET_FEATURES = VHOST_FEATURES | -- MST ^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v3 26/41] vhost: virtio 1.0 endian-ness support [not found] <1416829787-14252-1-git-send-email-mst@redhat.com> 2014-11-24 11:54 ` [PATCH v3 24/41] vhost: add memory access wrappers Michael S. Tsirkin 2014-11-24 11:54 ` [PATCH v3 25/41] vhost/net: force len for TX to host endian Michael S. Tsirkin @ 2014-11-24 11:54 ` Michael S. Tsirkin 2014-11-24 14:28 ` Cedric Le Goater 2014-11-24 11:54 ` [PATCH v3 27/41] vhost: make features 64 bit Michael S. Tsirkin ` (5 subsequent siblings) 8 siblings, 1 reply; 11+ messages in thread From: Michael S. Tsirkin @ 2014-11-24 11:54 UTC (permalink / raw) To: linux-kernel Cc: David Miller, cornelia.huck, rusty, nab, pbonzini, kvm, virtualization, netdev Signed-off-by: Michael S. Tsirkin <mst@redhat.com> --- drivers/vhost/vhost.c | 93 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 56 insertions(+), 37 deletions(-) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index c90f437..4d379ed 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -33,8 +33,8 @@ enum { VHOST_MEMORY_F_LOG = 0x1, }; -#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num]) -#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num]) +#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) +#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt) @@ -1001,7 +1001,7 @@ EXPORT_SYMBOL_GPL(vhost_log_write); static int vhost_update_used_flags(struct vhost_virtqueue *vq) { void __user *used; - if (__put_user(vq->used_flags, &vq->used->flags) < 0) + if (__put_user(cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags) < 0) return -EFAULT; if (unlikely(vq->log_used)) { /* Make sure the flag is seen before log. */ @@ -1019,7 +1019,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq) static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) { - if (__put_user(vq->avail_idx, vhost_avail_event(vq))) + if (__put_user(cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq))) return -EFAULT; if (unlikely(vq->log_used)) { void __user *used; @@ -1038,6 +1038,7 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) int vhost_init_used(struct vhost_virtqueue *vq) { + __virtio16 last_used_idx; int r; if (!vq->private_data) return 0; @@ -1046,7 +1047,13 @@ int vhost_init_used(struct vhost_virtqueue *vq) if (r) return r; vq->signalled_used_valid = false; - return get_user(vq->last_used_idx, &vq->used->idx); + if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) + return -EFAULT; + r = __get_user(last_used_idx, &vq->used->idx); + if (r) + return r; + vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); + return 0; } EXPORT_SYMBOL_GPL(vhost_init_used); @@ -1087,16 +1094,16 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, /* Each buffer in the virtqueues is actually a chain of descriptors. This * function returns the next descriptor in the chain, * or -1U if we're at the end. */ -static unsigned next_desc(struct vring_desc *desc) +static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc) { unsigned int next; /* If this descriptor says it doesn't chain, we're done. */ - if (!(desc->flags & VRING_DESC_F_NEXT)) + if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT))) return -1U; /* Check they're not leading us off end of descriptors. */ - next = desc->next; + next = vhost16_to_cpu(vq, desc->next); /* Make sure compiler knows to grab that: we don't want it changing! */ /* We will use the result as an index in an array, so most * architectures only need a compiler barrier here. */ @@ -1113,18 +1120,19 @@ static int get_indirect(struct vhost_virtqueue *vq, { struct vring_desc desc; unsigned int i = 0, count, found = 0; + u32 len = vhost32_to_cpu(vq, indirect->len); int ret; /* Sanity check */ - if (unlikely(indirect->len % sizeof desc)) { + if (unlikely(len % sizeof desc)) { vq_err(vq, "Invalid length in indirect descriptor: " "len 0x%llx not multiple of 0x%zx\n", - (unsigned long long)indirect->len, + (unsigned long long)vhost32_to_cpu(vq, indirect->len), sizeof desc); return -EINVAL; } - ret = translate_desc(vq, indirect->addr, indirect->len, vq->indirect, + ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect, UIO_MAXIOV); if (unlikely(ret < 0)) { vq_err(vq, "Translation failure %d in indirect.\n", ret); @@ -1135,7 +1143,7 @@ static int get_indirect(struct vhost_virtqueue *vq, * architectures only need a compiler barrier here. */ read_barrier_depends(); - count = indirect->len / sizeof desc; + count = len / sizeof desc; /* Buffers are chained via a 16 bit next field, so * we can have at most 2^16 of these. */ if (unlikely(count > USHRT_MAX + 1)) { @@ -1155,16 +1163,17 @@ static int get_indirect(struct vhost_virtqueue *vq, if (unlikely(memcpy_fromiovec((unsigned char *)&desc, vq->indirect, sizeof desc))) { vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", - i, (size_t)indirect->addr + i * sizeof desc); + i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); return -EINVAL; } - if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) { + if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) { vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", - i, (size_t)indirect->addr + i * sizeof desc); + i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); return -EINVAL; } - ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count, + ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), + vhost32_to_cpu(vq, desc.len), iov + iov_count, iov_size - iov_count); if (unlikely(ret < 0)) { vq_err(vq, "Translation failure %d indirect idx %d\n", @@ -1172,11 +1181,11 @@ static int get_indirect(struct vhost_virtqueue *vq, return ret; } /* If this is an input descriptor, increment that count. */ - if (desc.flags & VRING_DESC_F_WRITE) { + if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) { *in_num += ret; if (unlikely(log)) { - log[*log_num].addr = desc.addr; - log[*log_num].len = desc.len; + log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); + log[*log_num].len = vhost32_to_cpu(vq, desc.len); ++*log_num; } } else { @@ -1189,7 +1198,7 @@ static int get_indirect(struct vhost_virtqueue *vq, } *out_num += ret; } - } while ((i = next_desc(&desc)) != -1); + } while ((i = next_desc(vq, &desc)) != -1); return 0; } @@ -1209,15 +1218,18 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, struct vring_desc desc; unsigned int i, head, found = 0; u16 last_avail_idx; + __virtio16 avail_idx; + __virtio16 ring_head; int ret; /* Check it isn't doing very strange things with descriptor numbers. */ last_avail_idx = vq->last_avail_idx; - if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) { + if (unlikely(__get_user(avail_idx, &vq->avail->idx))) { vq_err(vq, "Failed to access avail idx at %p\n", &vq->avail->idx); return -EFAULT; } + vq->avail_idx = vhost16_to_cpu(vq, avail_idx); if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { vq_err(vq, "Guest moved used index from %u to %u", @@ -1234,7 +1246,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, /* Grab the next descriptor number they're advertising, and increment * the index we've seen. */ - if (unlikely(__get_user(head, + if (unlikely(__get_user(ring_head, &vq->avail->ring[last_avail_idx % vq->num]))) { vq_err(vq, "Failed to read head: idx %d address %p\n", last_avail_idx, @@ -1242,6 +1254,8 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, return -EFAULT; } + head = vhost16_to_cpu(vq, ring_head); + /* If their number is silly, that's an error. */ if (unlikely(head >= vq->num)) { vq_err(vq, "Guest says index %u > %u is available", @@ -1274,7 +1288,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, i, vq->desc + i); return -EFAULT; } - if (desc.flags & VRING_DESC_F_INDIRECT) { + if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) { ret = get_indirect(vq, iov, iov_size, out_num, in_num, log, log_num, &desc); @@ -1286,20 +1300,21 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, continue; } - ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count, + ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), + vhost32_to_cpu(vq, desc.len), iov + iov_count, iov_size - iov_count); if (unlikely(ret < 0)) { vq_err(vq, "Translation failure %d descriptor idx %d\n", ret, i); return ret; } - if (desc.flags & VRING_DESC_F_WRITE) { + if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) { /* If this is an input descriptor, * increment that count. */ *in_num += ret; if (unlikely(log)) { - log[*log_num].addr = desc.addr; - log[*log_num].len = desc.len; + log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); + log[*log_num].len = vhost32_to_cpu(vq, desc.len); ++*log_num; } } else { @@ -1312,7 +1327,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, } *out_num += ret; } - } while ((i = next_desc(&desc)) != -1); + } while ((i = next_desc(vq, &desc)) != -1); /* On success, increment avail index. */ vq->last_avail_idx++; @@ -1335,7 +1350,10 @@ EXPORT_SYMBOL_GPL(vhost_discard_vq_desc); * want to notify the guest, using eventfd. */ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) { - struct vring_used_elem heads = { head, len }; + struct vring_used_elem heads = { + cpu_to_vhost32(vq, head), + cpu_to_vhost32(vq, len) + }; return vhost_add_used_n(vq, &heads, 1); } @@ -1404,7 +1422,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, /* Make sure buffer is written before we update index. */ smp_wmb(); - if (put_user(vq->last_used_idx, &vq->used->idx)) { + if (__put_user(cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx)) { vq_err(vq, "Failed to increment used idx"); return -EFAULT; } @@ -1422,7 +1440,8 @@ EXPORT_SYMBOL_GPL(vhost_add_used_n); static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) { - __u16 old, new, event; + __u16 old, new; + __virtio16 event; bool v; /* Flush out used index updates. This is paired * with the barrier that the Guest executes when enabling @@ -1434,12 +1453,12 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) return true; if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { - __u16 flags; + __virtio16 flags; if (__get_user(flags, &vq->avail->flags)) { vq_err(vq, "Failed to get flags"); return true; } - return !(flags & VRING_AVAIL_F_NO_INTERRUPT); + return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT)); } old = vq->signalled_used; v = vq->signalled_used_valid; @@ -1449,11 +1468,11 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) if (unlikely(!v)) return true; - if (get_user(event, vhost_used_event(vq))) { + if (__get_user(event, vhost_used_event(vq))) { vq_err(vq, "Failed to get used event idx"); return true; } - return vring_need_event(event, new, old); + return vring_need_event(vhost16_to_cpu(vq, event), new, old); } /* This actually signals the guest, using eventfd. */ @@ -1488,7 +1507,7 @@ EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n); /* OK, now we need to know about added descriptors. */ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) { - u16 avail_idx; + __virtio16 avail_idx; int r; if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) @@ -1519,7 +1538,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) return false; } - return avail_idx != vq->avail_idx; + return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx; } EXPORT_SYMBOL_GPL(vhost_enable_notify); -- MST ^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH v3 26/41] vhost: virtio 1.0 endian-ness support 2014-11-24 11:54 ` [PATCH v3 26/41] vhost: virtio 1.0 endian-ness support Michael S. Tsirkin @ 2014-11-24 14:28 ` Cedric Le Goater 2014-11-24 20:28 ` Michael S. Tsirkin 0 siblings, 1 reply; 11+ messages in thread From: Cedric Le Goater @ 2014-11-24 14:28 UTC (permalink / raw) To: Michael S. Tsirkin, linux-kernel Cc: David Miller, cornelia.huck, rusty, nab, pbonzini, kvm, virtualization, netdev Hi Michael, Do you have a tree from where I could pull these patches ? Thanks, C. On 11/24/2014 12:54 PM, Michael S. Tsirkin wrote: > Signed-off-by: Michael S. Tsirkin <mst@redhat.com> > --- > drivers/vhost/vhost.c | 93 +++++++++++++++++++++++++++++++-------------------- > 1 file changed, 56 insertions(+), 37 deletions(-) > > diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c > index c90f437..4d379ed 100644 > --- a/drivers/vhost/vhost.c > +++ b/drivers/vhost/vhost.c > @@ -33,8 +33,8 @@ enum { > VHOST_MEMORY_F_LOG = 0x1, > }; > > -#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num]) > -#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num]) > +#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) > +#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) > > static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, > poll_table *pt) > @@ -1001,7 +1001,7 @@ EXPORT_SYMBOL_GPL(vhost_log_write); > static int vhost_update_used_flags(struct vhost_virtqueue *vq) > { > void __user *used; > - if (__put_user(vq->used_flags, &vq->used->flags) < 0) > + if (__put_user(cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags) < 0) > return -EFAULT; > if (unlikely(vq->log_used)) { > /* Make sure the flag is seen before log. */ > @@ -1019,7 +1019,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq) > > static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) > { > - if (__put_user(vq->avail_idx, vhost_avail_event(vq))) > + if (__put_user(cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq))) > return -EFAULT; > if (unlikely(vq->log_used)) { > void __user *used; > @@ -1038,6 +1038,7 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) > > int vhost_init_used(struct vhost_virtqueue *vq) > { > + __virtio16 last_used_idx; > int r; > if (!vq->private_data) > return 0; > @@ -1046,7 +1047,13 @@ int vhost_init_used(struct vhost_virtqueue *vq) > if (r) > return r; > vq->signalled_used_valid = false; > - return get_user(vq->last_used_idx, &vq->used->idx); > + if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) > + return -EFAULT; > + r = __get_user(last_used_idx, &vq->used->idx); > + if (r) > + return r; > + vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); > + return 0; > } > EXPORT_SYMBOL_GPL(vhost_init_used); > > @@ -1087,16 +1094,16 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, > /* Each buffer in the virtqueues is actually a chain of descriptors. This > * function returns the next descriptor in the chain, > * or -1U if we're at the end. */ > -static unsigned next_desc(struct vring_desc *desc) > +static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc) > { > unsigned int next; > > /* If this descriptor says it doesn't chain, we're done. */ > - if (!(desc->flags & VRING_DESC_F_NEXT)) > + if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT))) > return -1U; > > /* Check they're not leading us off end of descriptors. */ > - next = desc->next; > + next = vhost16_to_cpu(vq, desc->next); > /* Make sure compiler knows to grab that: we don't want it changing! */ > /* We will use the result as an index in an array, so most > * architectures only need a compiler barrier here. */ > @@ -1113,18 +1120,19 @@ static int get_indirect(struct vhost_virtqueue *vq, > { > struct vring_desc desc; > unsigned int i = 0, count, found = 0; > + u32 len = vhost32_to_cpu(vq, indirect->len); > int ret; > > /* Sanity check */ > - if (unlikely(indirect->len % sizeof desc)) { > + if (unlikely(len % sizeof desc)) { > vq_err(vq, "Invalid length in indirect descriptor: " > "len 0x%llx not multiple of 0x%zx\n", > - (unsigned long long)indirect->len, > + (unsigned long long)vhost32_to_cpu(vq, indirect->len), > sizeof desc); > return -EINVAL; > } > > - ret = translate_desc(vq, indirect->addr, indirect->len, vq->indirect, > + ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect, > UIO_MAXIOV); > if (unlikely(ret < 0)) { > vq_err(vq, "Translation failure %d in indirect.\n", ret); > @@ -1135,7 +1143,7 @@ static int get_indirect(struct vhost_virtqueue *vq, > * architectures only need a compiler barrier here. */ > read_barrier_depends(); > > - count = indirect->len / sizeof desc; > + count = len / sizeof desc; > /* Buffers are chained via a 16 bit next field, so > * we can have at most 2^16 of these. */ > if (unlikely(count > USHRT_MAX + 1)) { > @@ -1155,16 +1163,17 @@ static int get_indirect(struct vhost_virtqueue *vq, > if (unlikely(memcpy_fromiovec((unsigned char *)&desc, > vq->indirect, sizeof desc))) { > vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", > - i, (size_t)indirect->addr + i * sizeof desc); > + i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); > return -EINVAL; > } > - if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) { > + if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) { > vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", > - i, (size_t)indirect->addr + i * sizeof desc); > + i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); > return -EINVAL; > } > > - ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count, > + ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), > + vhost32_to_cpu(vq, desc.len), iov + iov_count, > iov_size - iov_count); > if (unlikely(ret < 0)) { > vq_err(vq, "Translation failure %d indirect idx %d\n", > @@ -1172,11 +1181,11 @@ static int get_indirect(struct vhost_virtqueue *vq, > return ret; > } > /* If this is an input descriptor, increment that count. */ > - if (desc.flags & VRING_DESC_F_WRITE) { > + if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) { > *in_num += ret; > if (unlikely(log)) { > - log[*log_num].addr = desc.addr; > - log[*log_num].len = desc.len; > + log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); > + log[*log_num].len = vhost32_to_cpu(vq, desc.len); > ++*log_num; > } > } else { > @@ -1189,7 +1198,7 @@ static int get_indirect(struct vhost_virtqueue *vq, > } > *out_num += ret; > } > - } while ((i = next_desc(&desc)) != -1); > + } while ((i = next_desc(vq, &desc)) != -1); > return 0; > } > > @@ -1209,15 +1218,18 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, > struct vring_desc desc; > unsigned int i, head, found = 0; > u16 last_avail_idx; > + __virtio16 avail_idx; > + __virtio16 ring_head; > int ret; > > /* Check it isn't doing very strange things with descriptor numbers. */ > last_avail_idx = vq->last_avail_idx; > - if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) { > + if (unlikely(__get_user(avail_idx, &vq->avail->idx))) { > vq_err(vq, "Failed to access avail idx at %p\n", > &vq->avail->idx); > return -EFAULT; > } > + vq->avail_idx = vhost16_to_cpu(vq, avail_idx); > > if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { > vq_err(vq, "Guest moved used index from %u to %u", > @@ -1234,7 +1246,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, > > /* Grab the next descriptor number they're advertising, and increment > * the index we've seen. */ > - if (unlikely(__get_user(head, > + if (unlikely(__get_user(ring_head, > &vq->avail->ring[last_avail_idx % vq->num]))) { > vq_err(vq, "Failed to read head: idx %d address %p\n", > last_avail_idx, > @@ -1242,6 +1254,8 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, > return -EFAULT; > } > > + head = vhost16_to_cpu(vq, ring_head); > + > /* If their number is silly, that's an error. */ > if (unlikely(head >= vq->num)) { > vq_err(vq, "Guest says index %u > %u is available", > @@ -1274,7 +1288,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, > i, vq->desc + i); > return -EFAULT; > } > - if (desc.flags & VRING_DESC_F_INDIRECT) { > + if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) { > ret = get_indirect(vq, iov, iov_size, > out_num, in_num, > log, log_num, &desc); > @@ -1286,20 +1300,21 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, > continue; > } > > - ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count, > + ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), > + vhost32_to_cpu(vq, desc.len), iov + iov_count, > iov_size - iov_count); > if (unlikely(ret < 0)) { > vq_err(vq, "Translation failure %d descriptor idx %d\n", > ret, i); > return ret; > } > - if (desc.flags & VRING_DESC_F_WRITE) { > + if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) { > /* If this is an input descriptor, > * increment that count. */ > *in_num += ret; > if (unlikely(log)) { > - log[*log_num].addr = desc.addr; > - log[*log_num].len = desc.len; > + log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); > + log[*log_num].len = vhost32_to_cpu(vq, desc.len); > ++*log_num; > } > } else { > @@ -1312,7 +1327,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, > } > *out_num += ret; > } > - } while ((i = next_desc(&desc)) != -1); > + } while ((i = next_desc(vq, &desc)) != -1); > > /* On success, increment avail index. */ > vq->last_avail_idx++; > @@ -1335,7 +1350,10 @@ EXPORT_SYMBOL_GPL(vhost_discard_vq_desc); > * want to notify the guest, using eventfd. */ > int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) > { > - struct vring_used_elem heads = { head, len }; > + struct vring_used_elem heads = { > + cpu_to_vhost32(vq, head), > + cpu_to_vhost32(vq, len) > + }; > > return vhost_add_used_n(vq, &heads, 1); > } > @@ -1404,7 +1422,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, > > /* Make sure buffer is written before we update index. */ > smp_wmb(); > - if (put_user(vq->last_used_idx, &vq->used->idx)) { > + if (__put_user(cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx)) { > vq_err(vq, "Failed to increment used idx"); > return -EFAULT; > } > @@ -1422,7 +1440,8 @@ EXPORT_SYMBOL_GPL(vhost_add_used_n); > > static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) > { > - __u16 old, new, event; > + __u16 old, new; > + __virtio16 event; > bool v; > /* Flush out used index updates. This is paired > * with the barrier that the Guest executes when enabling > @@ -1434,12 +1453,12 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) > return true; > > if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { > - __u16 flags; > + __virtio16 flags; > if (__get_user(flags, &vq->avail->flags)) { > vq_err(vq, "Failed to get flags"); > return true; > } > - return !(flags & VRING_AVAIL_F_NO_INTERRUPT); > + return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT)); > } > old = vq->signalled_used; > v = vq->signalled_used_valid; > @@ -1449,11 +1468,11 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) > if (unlikely(!v)) > return true; > > - if (get_user(event, vhost_used_event(vq))) { > + if (__get_user(event, vhost_used_event(vq))) { > vq_err(vq, "Failed to get used event idx"); > return true; > } > - return vring_need_event(event, new, old); > + return vring_need_event(vhost16_to_cpu(vq, event), new, old); > } > > /* This actually signals the guest, using eventfd. */ > @@ -1488,7 +1507,7 @@ EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n); > /* OK, now we need to know about added descriptors. */ > bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) > { > - u16 avail_idx; > + __virtio16 avail_idx; > int r; > > if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) > @@ -1519,7 +1538,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) > return false; > } > > - return avail_idx != vq->avail_idx; > + return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx; > } > EXPORT_SYMBOL_GPL(vhost_enable_notify); > ^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH v3 26/41] vhost: virtio 1.0 endian-ness support 2014-11-24 14:28 ` Cedric Le Goater @ 2014-11-24 20:28 ` Michael S. Tsirkin 0 siblings, 0 replies; 11+ messages in thread From: Michael S. Tsirkin @ 2014-11-24 20:28 UTC (permalink / raw) To: Cedric Le Goater Cc: kvm, rusty, netdev, linux-kernel, virtualization, pbonzini, David Miller On Mon, Nov 24, 2014 at 03:28:26PM +0100, Cedric Le Goater wrote: > Hi Michael, > > Do you have a tree from where I could pull these patches ? > > Thanks, > > C. Yes - vhost-next that linux-next includes. -- MST ^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH v3 27/41] vhost: make features 64 bit [not found] <1416829787-14252-1-git-send-email-mst@redhat.com> ` (2 preceding siblings ...) 2014-11-24 11:54 ` [PATCH v3 26/41] vhost: virtio 1.0 endian-ness support Michael S. Tsirkin @ 2014-11-24 11:54 ` Michael S. Tsirkin 2014-11-24 11:54 ` [PATCH v3 28/41] vhost/net: virtio 1.0 byte swap Michael S. Tsirkin ` (4 subsequent siblings) 8 siblings, 0 replies; 11+ messages in thread From: Michael S. Tsirkin @ 2014-11-24 11:54 UTC (permalink / raw) To: linux-kernel; +Cc: kvm, rusty, netdev, virtualization, pbonzini, David Miller We need to use bit 32 for virtio 1.0 Signed-off-by: Michael S. Tsirkin <mst@redhat.com> --- drivers/vhost/vhost.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index b9032e8..1f321fd 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -106,7 +106,7 @@ struct vhost_virtqueue { /* Protected by virtqueue mutex. */ struct vhost_memory *memory; void *private_data; - unsigned acked_features; + u64 acked_features; /* Log write descriptors */ void __user *log_base; struct vhost_log *log; -- MST ^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v3 28/41] vhost/net: virtio 1.0 byte swap [not found] <1416829787-14252-1-git-send-email-mst@redhat.com> ` (3 preceding siblings ...) 2014-11-24 11:54 ` [PATCH v3 27/41] vhost: make features 64 bit Michael S. Tsirkin @ 2014-11-24 11:54 ` Michael S. Tsirkin 2014-11-24 11:54 ` [PATCH v3 29/41] vhost/net: larger header for virtio 1.0 Michael S. Tsirkin ` (3 subsequent siblings) 8 siblings, 0 replies; 11+ messages in thread From: Michael S. Tsirkin @ 2014-11-24 11:54 UTC (permalink / raw) To: linux-kernel Cc: David Miller, cornelia.huck, rusty, nab, pbonzini, kvm, virtualization, netdev Signed-off-by: Michael S. Tsirkin <mst@redhat.com> --- drivers/vhost/net.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index dce5c58..cae22f9 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -416,7 +416,7 @@ static void handle_tx(struct vhost_net *net) struct ubuf_info *ubuf; ubuf = nvq->ubuf_info + nvq->upend_idx; - vq->heads[nvq->upend_idx].id = head; + vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head); vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; ubuf->callback = vhost_zerocopy_callback; ubuf->ctx = nvq->ubufs; @@ -500,6 +500,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, int headcount = 0; unsigned d; int r, nlogs = 0; + u32 len; while (datalen > 0 && headcount < quota) { if (unlikely(seg >= UIO_MAXIOV)) { @@ -527,13 +528,14 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, nlogs += *log_num; log += *log_num; } - heads[headcount].id = d; - heads[headcount].len = iov_length(vq->iov + seg, in); - datalen -= heads[headcount].len; + heads[headcount].id = cpu_to_vhost32(vq, d); + len = iov_length(vq->iov + seg, in); + heads[headcount].len = cpu_to_vhost32(vq, len); + datalen -= len; ++headcount; seg += in; } - heads[headcount - 1].len += datalen; + heads[headcount - 1].len = cpu_to_vhost32(vq, len - datalen); *iovcount = seg; if (unlikely(log)) *log_num = nlogs; -- MST ^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v3 29/41] vhost/net: larger header for virtio 1.0 [not found] <1416829787-14252-1-git-send-email-mst@redhat.com> ` (4 preceding siblings ...) 2014-11-24 11:54 ` [PATCH v3 28/41] vhost/net: virtio 1.0 byte swap Michael S. Tsirkin @ 2014-11-24 11:54 ` Michael S. Tsirkin 2014-11-24 11:54 ` [PATCH v3 30/41] vhost/net: enable " Michael S. Tsirkin ` (2 subsequent siblings) 8 siblings, 0 replies; 11+ messages in thread From: Michael S. Tsirkin @ 2014-11-24 11:54 UTC (permalink / raw) To: linux-kernel Cc: David Miller, cornelia.huck, rusty, nab, pbonzini, kvm, virtualization, netdev Signed-off-by: Michael S. Tsirkin <mst@redhat.com> --- drivers/vhost/net.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index cae22f9..1ac58d0 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -1027,7 +1027,8 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features) size_t vhost_hlen, sock_hlen, hdr_len; int i; - hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? + hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | + (1ULL << VIRTIO_F_VERSION_1))) ? sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr); if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) { -- MST ^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v3 30/41] vhost/net: enable virtio 1.0 [not found] <1416829787-14252-1-git-send-email-mst@redhat.com> ` (5 preceding siblings ...) 2014-11-24 11:54 ` [PATCH v3 29/41] vhost/net: larger header for virtio 1.0 Michael S. Tsirkin @ 2014-11-24 11:54 ` Michael S. Tsirkin 2014-11-24 11:54 ` [PATCH v3 31/41] vhost/net: suppress compiler warning Michael S. Tsirkin 2014-11-24 11:55 ` [PATCH v3 40/41] vhost/scsi: partial virtio 1.0 support Michael S. Tsirkin 8 siblings, 0 replies; 11+ messages in thread From: Michael S. Tsirkin @ 2014-11-24 11:54 UTC (permalink / raw) To: linux-kernel; +Cc: kvm, rusty, netdev, virtualization, pbonzini, David Miller Signed-off-by: Michael S. Tsirkin <mst@redhat.com> --- drivers/vhost/net.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 1ac58d0..984242e 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -61,7 +61,8 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;" enum { VHOST_NET_FEATURES = VHOST_FEATURES | (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | - (1ULL << VIRTIO_NET_F_MRG_RXBUF), + (1ULL << VIRTIO_NET_F_MRG_RXBUF) | + (1ULL << VIRTIO_F_VERSION_1), }; enum { -- MST ^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v3 31/41] vhost/net: suppress compiler warning [not found] <1416829787-14252-1-git-send-email-mst@redhat.com> ` (6 preceding siblings ...) 2014-11-24 11:54 ` [PATCH v3 30/41] vhost/net: enable " Michael S. Tsirkin @ 2014-11-24 11:54 ` Michael S. Tsirkin 2014-11-24 11:55 ` [PATCH v3 40/41] vhost/scsi: partial virtio 1.0 support Michael S. Tsirkin 8 siblings, 0 replies; 11+ messages in thread From: Michael S. Tsirkin @ 2014-11-24 11:54 UTC (permalink / raw) To: linux-kernel Cc: David Miller, cornelia.huck, rusty, nab, pbonzini, kvm, virtualization, netdev len is always initialized since function is called with size > 0. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> --- drivers/vhost/net.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 984242e..54ffbb0 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -501,7 +501,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, int headcount = 0; unsigned d; int r, nlogs = 0; - u32 len; + u32 uninitialized_var(len); while (datalen > 0 && headcount < quota) { if (unlikely(seg >= UIO_MAXIOV)) { -- MST ^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v3 40/41] vhost/scsi: partial virtio 1.0 support [not found] <1416829787-14252-1-git-send-email-mst@redhat.com> ` (7 preceding siblings ...) 2014-11-24 11:54 ` [PATCH v3 31/41] vhost/net: suppress compiler warning Michael S. Tsirkin @ 2014-11-24 11:55 ` Michael S. Tsirkin 8 siblings, 0 replies; 11+ messages in thread From: Michael S. Tsirkin @ 2014-11-24 11:55 UTC (permalink / raw) To: linux-kernel Cc: David Miller, cornelia.huck, rusty, nab, pbonzini, kvm, virtualization, netdev Include all endian conversions as required by virtio 1.0. Don't set virtio 1.0 yet, since that requires ANY_LAYOUT which we don't yet support. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> --- drivers/vhost/scsi.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index a17f118..01c01cb 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -168,6 +168,7 @@ enum { VHOST_SCSI_VQ_IO = 2, }; +/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */ enum { VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | (1ULL << VIRTIO_SCSI_F_T10_PI) @@ -577,8 +578,8 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs, return NULL; } - evt->event.event = event; - evt->event.reason = reason; + evt->event.event = cpu_to_vhost32(vq, event); + evt->event.reason = cpu_to_vhost32(vq, reason); vs->vs_events_nr++; return evt; @@ -636,7 +637,7 @@ again: } if (vs->vs_events_missed) { - event->event |= VIRTIO_SCSI_T_EVENTS_MISSED; + event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED); vs->vs_events_missed = false; } @@ -695,12 +696,13 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) cmd, se_cmd->residual_count, se_cmd->scsi_status); memset(&v_rsp, 0, sizeof(v_rsp)); - v_rsp.resid = se_cmd->residual_count; + v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count); /* TODO is status_qualifier field needed? */ v_rsp.status = se_cmd->scsi_status; - v_rsp.sense_len = se_cmd->scsi_sense_length; + v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq, + se_cmd->scsi_sense_length); memcpy(v_rsp.sense, cmd->tvc_sense_buf, - v_rsp.sense_len); + se_cmd->scsi_sense_length); ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); if (likely(ret == 0)) { struct vhost_scsi_virtqueue *q; @@ -1095,14 +1097,14 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) ", but wrong data_direction\n"); goto err_cmd; } - prot_bytes = v_req_pi.pi_bytesout; + prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); } else if (v_req_pi.pi_bytesin) { if (data_direction != DMA_FROM_DEVICE) { vq_err(vq, "Received non zero di_pi_niov" ", but wrong data_direction\n"); goto err_cmd; } - prot_bytes = v_req_pi.pi_bytesin; + prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); } if (prot_bytes) { int tmp = 0; @@ -1117,12 +1119,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) data_first += prot_niov; data_niov = data_num - prot_niov; } - tag = v_req_pi.tag; + tag = vhost64_to_cpu(vq, v_req_pi.tag); task_attr = v_req_pi.task_attr; cdb = &v_req_pi.cdb[0]; lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF; } else { - tag = v_req.tag; + tag = vhost64_to_cpu(vq, v_req.tag); task_attr = v_req.task_attr; cdb = &v_req.cdb[0]; lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; -- MST ^ permalink raw reply related [flat|nested] 11+ messages in thread
end of thread, other threads:[~2014-11-24 20:28 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <1416829787-14252-1-git-send-email-mst@redhat.com>
2014-11-24 11:54 ` [PATCH v3 24/41] vhost: add memory access wrappers Michael S. Tsirkin
2014-11-24 11:54 ` [PATCH v3 25/41] vhost/net: force len for TX to host endian Michael S. Tsirkin
2014-11-24 11:54 ` [PATCH v3 26/41] vhost: virtio 1.0 endian-ness support Michael S. Tsirkin
2014-11-24 14:28 ` Cedric Le Goater
2014-11-24 20:28 ` Michael S. Tsirkin
2014-11-24 11:54 ` [PATCH v3 27/41] vhost: make features 64 bit Michael S. Tsirkin
2014-11-24 11:54 ` [PATCH v3 28/41] vhost/net: virtio 1.0 byte swap Michael S. Tsirkin
2014-11-24 11:54 ` [PATCH v3 29/41] vhost/net: larger header for virtio 1.0 Michael S. Tsirkin
2014-11-24 11:54 ` [PATCH v3 30/41] vhost/net: enable " Michael S. Tsirkin
2014-11-24 11:54 ` [PATCH v3 31/41] vhost/net: suppress compiler warning Michael S. Tsirkin
2014-11-24 11:55 ` [PATCH v3 40/41] vhost/scsi: partial virtio 1.0 support Michael S. Tsirkin
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox