From mboxrd@z Thu Jan 1 00:00:00 1970 From: Maxime Coquelin Subject: Re: [PATCH 08/21] vhost: iotlb: add pending miss request list and helpers Date: Tue, 5 Sep 2017 17:18:37 +0200 Message-ID: <586f1ce1-d77a-e0ba-5c59-b698bcef4a41@redhat.com> References: <20170831095023.21037-1-maxime.coquelin@redhat.com> <20170831095023.21037-9-maxime.coquelin@redhat.com> <20170905071106.GA22515@debian-ZGViaWFuCg> Mime-Version: 1.0 Content-Type: text/plain; charset=utf-8; format=flowed Content-Transfer-Encoding: 7bit Cc: dev@dpdk.org, yliu@fridaylinux.org, jfreiman@redhat.com, mst@redhat.com, vkaplans@redhat.com, jasowang@redhat.com To: Tiwei Bie Return-path: Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by dpdk.org (Postfix) with ESMTP id AEB78F04 for ; Tue, 5 Sep 2017 17:18:45 +0200 (CEST) In-Reply-To: <20170905071106.GA22515@debian-ZGViaWFuCg> Content-Language: en-US List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" On 09/05/2017 09:11 AM, Tiwei Bie wrote: > On Thu, Aug 31, 2017 at 11:50:10AM +0200, Maxime Coquelin wrote: >> In order to be able to handle other ports or queues while waiting >> for an IOTLB miss reply, a pending list is created so that waiter >> can return and restart later on with sending again a miss request. >> >> Signed-off-by: Maxime Coquelin >> --- >> lib/librte_vhost/iotlb.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++-- >> lib/librte_vhost/iotlb.h | 4 +++ >> lib/librte_vhost/vhost.h | 1 + >> 3 files changed, 91 insertions(+), 2 deletions(-) >> >> diff --git a/lib/librte_vhost/iotlb.c b/lib/librte_vhost/iotlb.c >> index 1b739dae5..d014bfe98 100644 >> --- a/lib/librte_vhost/iotlb.c >> +++ b/lib/librte_vhost/iotlb.c >> @@ -49,7 +49,86 @@ struct vhost_iotlb_entry { >> uint8_t perm; >> }; >> >> -#define IOTLB_CACHE_SIZE 1024 >> +#define IOTLB_CACHE_SIZE 2048 >> + >> +static void vhost_user_iotlb_pending_remove_all(struct vhost_virtqueue *vq) >> +{ >> + struct vhost_iotlb_entry *node, *temp_node; >> + >> + rte_rwlock_write_lock(&vq->iotlb_lock); >> + >> + TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) { >> + TAILQ_REMOVE(&vq->iotlb_pending_list, node, next); >> + rte_mempool_put(vq->iotlb_pool, node); >> + } >> + >> + rte_rwlock_write_unlock(&vq->iotlb_lock); >> +} >> + >> +int vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova, >> + uint8_t perm) >> +{ >> + struct vhost_iotlb_entry *node; >> + int found = 0; >> + > > The return value of this function is boolean. So it's better > to return bool instead of int. Fixed. >> + rte_rwlock_read_lock(&vq->iotlb_lock); >> + >> + TAILQ_FOREACH(node, &vq->iotlb_pending_list, next) { >> + if ((node->iova == iova) && (node->perm == perm)) { >> + found = 1; >> + break; >> + } >> + } >> + >> + rte_rwlock_read_unlock(&vq->iotlb_lock); >> + >> + return found; >> +} >> + >> +void vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq, >> + uint64_t iova, uint8_t perm) >> +{ >> + struct vhost_iotlb_entry *node; >> + int ret; >> + >> + ret = rte_mempool_get(vq->iotlb_pool, (void **)&node); >> + if (ret) { >> + RTE_LOG(ERR, VHOST_CONFIG, "IOTLB pool empty, invalidate cache\n"); > > I think The log level should be INFO or the likes, not ERR. Fixed. > >> + vhost_user_iotlb_pending_remove_all(vq); >> + ret = rte_mempool_get(vq->iotlb_pool, (void **)&node); >> + if (ret) { >> + RTE_LOG(ERR, VHOST_CONFIG, "IOTLB pool still empty, failure\n"); >> + return; >> + } >> + } >> + >> + node->iova = iova; >> + node->perm = perm; >> + >> + rte_rwlock_write_lock(&vq->iotlb_lock); >> + >> + TAILQ_INSERT_TAIL(&vq->iotlb_pending_list, node, next); >> + >> + rte_rwlock_write_unlock(&vq->iotlb_lock); >> +} >> + >> +static void vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq, >> + uint64_t iova, uint64_t size, uint8_t perm) >> +{ >> + struct vhost_iotlb_entry *node, *temp_node; >> + >> + /* .iotlb_lock already locked by the caller */ >> + TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) { >> + if (node->iova < iova) >> + continue; >> + if (node->iova >= iova + size) >> + continue; >> + if ((node->perm & perm) != node->perm) >> + continue; >> + TAILQ_REMOVE(&vq->iotlb_pending_list, node, next); >> + rte_mempool_put(vq->iotlb_pool, node); >> + } >> +} >> >> static void vhost_user_iotlb_cache_remove_all(struct vhost_virtqueue *vq) >> { >> @@ -106,7 +185,10 @@ void vhost_user_iotlb_cache_insert(struct vhost_virtqueue *vq, uint64_t iova, >> TAILQ_INSERT_TAIL(&vq->iotlb_list, new_node, next); >> >> unlock: >> + vhost_user_iotlb_pending_remove(vq, iova, size, perm); >> + >> rte_rwlock_write_unlock(&vq->iotlb_lock); >> + > > This empty line should be removed. Yes, this part disappears in next version, as I squashed patch 21 in patches 7 & 8. Thanks, Maxime > Best regards, > Tiwei Bie > >> } >> >> void vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq,