From mboxrd@z Thu Jan 1 00:00:00 1970 From: Andrew Rybchenko Subject: [RFC v2 16/17] mempool/bucket: implement block dequeue operation Date: Tue, 23 Jan 2018 13:16:11 +0000 Message-ID: <1516713372-10572-17-git-send-email-arybchenko@solarflare.com> References: <1511539591-20966-1-git-send-email-arybchenko@solarflare.com> <1516713372-10572-1-git-send-email-arybchenko@solarflare.com> Mime-Version: 1.0 Content-Type: text/plain Cc: Olivier Matz , "Artem V. Andreev" To: Return-path: Received: from dispatch1-us1.ppe-hosted.com (dispatch1-us1.ppe-hosted.com [148.163.129.52]) by dpdk.org (Postfix) with ESMTP id 3501A1B00C for ; Tue, 23 Jan 2018 14:16:54 +0100 (CET) In-Reply-To: <1516713372-10572-1-git-send-email-arybchenko@solarflare.com> List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: "Artem V. Andreev" Signed-off-by: Artem V. Andreev Signed-off-by: Andrew Rybchenko --- drivers/mempool/bucket/rte_mempool_bucket.c | 52 +++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/drivers/mempool/bucket/rte_mempool_bucket.c b/drivers/mempool/bucket/rte_mempool_bucket.c index dc4e1dc..03fccf1 100644 --- a/drivers/mempool/bucket/rte_mempool_bucket.c +++ b/drivers/mempool/bucket/rte_mempool_bucket.c @@ -294,6 +294,46 @@ bucket_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) return rc; } +static int +bucket_dequeue_contig_blocks(struct rte_mempool *mp, void **first_obj_table, + unsigned int n) +{ + struct bucket_data *bd = mp->pool_data; + const uint32_t header_size = bd->header_size; + struct bucket_stack *cur_stack = bd->buckets[rte_lcore_id()]; + unsigned int n_buckets_from_stack = RTE_MIN(n, cur_stack->top); + struct bucket_header *hdr; + void **first_objp = first_obj_table; + + bucket_adopt_orphans(bd); + + n -= n_buckets_from_stack; + while (n_buckets_from_stack-- > 0) { + hdr = bucket_stack_pop_unsafe(cur_stack); + *first_objp++ = (uint8_t *)hdr + header_size; + } + if (n > 0) { + if (unlikely(rte_ring_dequeue_bulk(bd->shared_bucket_ring, + first_objp, n, NULL) != n)) { + /* Return the already dequeued buckets */ + while (first_objp-- != first_obj_table) { + bucket_stack_push(cur_stack, + (uint8_t *)*first_objp - + header_size); + } + rte_errno = ENOBUFS; + return -rte_errno; + } + while (n-- > 0) { + hdr = (struct bucket_header *)*first_objp; + hdr->lcore_id = rte_lcore_id(); + *first_objp++ = (uint8_t *)hdr + header_size; + } + } + + return 0; +} + static void count_underfilled_buckets(struct rte_mempool *mp, void *opaque, @@ -546,6 +586,16 @@ bucket_populate(struct rte_mempool *mp, unsigned int max_objs, return n_objs; } +static int +bucket_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info) +{ + struct bucket_data *bd = mp->pool_data; + + info->contig_block_size = bd->obj_per_bucket; + return 0; +} + + static const struct rte_mempool_ops ops_bucket = { .name = "bucket", .alloc = bucket_alloc, @@ -555,6 +605,8 @@ static const struct rte_mempool_ops ops_bucket = { .get_count = bucket_get_count, .calc_mem_size = bucket_calc_mem_size, .populate = bucket_populate, + .get_info = bucket_get_info, + .dequeue_contig_blocks = bucket_dequeue_contig_blocks, }; -- 2.7.4