From mboxrd@z Thu Jan 1 00:00:00 1970 From: Maxime Coquelin Subject: [PATCH 08/21] vhost: iotlb: add pending miss request list and helpers Date: Thu, 31 Aug 2017 11:50:10 +0200 Message-ID: <20170831095023.21037-9-maxime.coquelin@redhat.com> References: <20170831095023.21037-1-maxime.coquelin@redhat.com> Cc: mst@redhat.com, vkaplans@redhat.com, jasowang@redhat.com, Maxime Coquelin To: dev@dpdk.org, yliu@fridaylinux.org, jfreiman@redhat.com, tiwei.bie@intel.com Return-path: Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by dpdk.org (Postfix) with ESMTP id 0F2AE6841 for ; Thu, 31 Aug 2017 11:51:35 +0200 (CEST) In-Reply-To: <20170831095023.21037-1-maxime.coquelin@redhat.com> List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" In order to be able to handle other ports or queues while waiting for an IOTLB miss reply, a pending list is created so that waiter can return and restart later on with sending again a miss request. Signed-off-by: Maxime Coquelin --- lib/librte_vhost/iotlb.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++-- lib/librte_vhost/iotlb.h | 4 +++ lib/librte_vhost/vhost.h | 1 + 3 files changed, 91 insertions(+), 2 deletions(-) diff --git a/lib/librte_vhost/iotlb.c b/lib/librte_vhost/iotlb.c index 1b739dae5..d014bfe98 100644 --- a/lib/librte_vhost/iotlb.c +++ b/lib/librte_vhost/iotlb.c @@ -49,7 +49,86 @@ struct vhost_iotlb_entry { uint8_t perm; }; -#define IOTLB_CACHE_SIZE 1024 +#define IOTLB_CACHE_SIZE 2048 + +static void vhost_user_iotlb_pending_remove_all(struct vhost_virtqueue *vq) +{ + struct vhost_iotlb_entry *node, *temp_node; + + rte_rwlock_write_lock(&vq->iotlb_lock); + + TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) { + TAILQ_REMOVE(&vq->iotlb_pending_list, node, next); + rte_mempool_put(vq->iotlb_pool, node); + } + + rte_rwlock_write_unlock(&vq->iotlb_lock); +} + +int vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova, + uint8_t perm) +{ + struct vhost_iotlb_entry *node; + int found = 0; + + rte_rwlock_read_lock(&vq->iotlb_lock); + + TAILQ_FOREACH(node, &vq->iotlb_pending_list, next) { + if ((node->iova == iova) && (node->perm == perm)) { + found = 1; + break; + } + } + + rte_rwlock_read_unlock(&vq->iotlb_lock); + + return found; +} + +void vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq, + uint64_t iova, uint8_t perm) +{ + struct vhost_iotlb_entry *node; + int ret; + + ret = rte_mempool_get(vq->iotlb_pool, (void **)&node); + if (ret) { + RTE_LOG(ERR, VHOST_CONFIG, "IOTLB pool empty, invalidate cache\n"); + vhost_user_iotlb_pending_remove_all(vq); + ret = rte_mempool_get(vq->iotlb_pool, (void **)&node); + if (ret) { + RTE_LOG(ERR, VHOST_CONFIG, "IOTLB pool still empty, failure\n"); + return; + } + } + + node->iova = iova; + node->perm = perm; + + rte_rwlock_write_lock(&vq->iotlb_lock); + + TAILQ_INSERT_TAIL(&vq->iotlb_pending_list, node, next); + + rte_rwlock_write_unlock(&vq->iotlb_lock); +} + +static void vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq, + uint64_t iova, uint64_t size, uint8_t perm) +{ + struct vhost_iotlb_entry *node, *temp_node; + + /* .iotlb_lock already locked by the caller */ + TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) { + if (node->iova < iova) + continue; + if (node->iova >= iova + size) + continue; + if ((node->perm & perm) != node->perm) + continue; + TAILQ_REMOVE(&vq->iotlb_pending_list, node, next); + rte_mempool_put(vq->iotlb_pool, node); + } +} static void vhost_user_iotlb_cache_remove_all(struct vhost_virtqueue *vq) { @@ -106,7 +185,10 @@ void vhost_user_iotlb_cache_insert(struct vhost_virtqueue *vq, uint64_t iova, TAILQ_INSERT_TAIL(&vq->iotlb_list, new_node, next); unlock: + vhost_user_iotlb_pending_remove(vq, iova, size, perm); + rte_rwlock_write_unlock(&vq->iotlb_lock); + } void vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq, @@ -189,9 +271,10 @@ int vhost_user_iotlb_init(struct virtio_net *dev, int vq_index) if (vq->iotlb_pool) { /* * The cache has already been initialized, - * just drop all entries + * just drop all cached and pending entries. */ vhost_user_iotlb_cache_remove_all(vq); + vhost_user_iotlb_pending_remove_all(vq); return 0; } @@ -204,6 +287,7 @@ int vhost_user_iotlb_init(struct virtio_net *dev, int vq_index) rte_rwlock_init(&vq->iotlb_lock); TAILQ_INIT(&vq->iotlb_list); + TAILQ_INIT(&vq->iotlb_pending_list); snprintf(pool_name, sizeof(pool_name), "iotlb_cache_%d_%d", dev->vid, vq_index); diff --git a/lib/librte_vhost/iotlb.h b/lib/librte_vhost/iotlb.h index 459820762..4be1f7e85 100644 --- a/lib/librte_vhost/iotlb.h +++ b/lib/librte_vhost/iotlb.h @@ -41,6 +41,10 @@ void vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq, uint64_t iova, uint64_t size); uint64_t vhost_user_iotlb_cache_find(struct vhost_virtqueue *vq, uint64_t iova, uint64_t *size, uint8_t perm); +int vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova, + uint8_t perm); +void vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq, uint64_t iova, + uint8_t perm); int vhost_user_iotlb_init(struct virtio_net *dev, int vq_index); #endif /* _VHOST_IOTLB_H_ */ diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 7816a92b5..a41bacea7 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -119,6 +119,7 @@ struct vhost_virtqueue { rte_rwlock_t iotlb_lock; struct rte_mempool *iotlb_pool; TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list; + TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list; } __rte_cache_aligned; /* Old kernels have no such macros defined */ -- 2.13.3