* [PATCH v1] ION: Sys_heap: Add cached pool to spead up cached buffer alloc
@ 2016-05-09 11:35 Chen Feng
2016-05-10 22:48 ` Laura Abbott
0 siblings, 1 reply; 3+ messages in thread
From: Chen Feng @ 2016-05-09 11:35 UTC (permalink / raw)
To: puck.chen, saberlily.xia, gregkh, arve, riandrews, labbott,
paul.gortmaker, bmarsh94, devel, linux-kernel
Cc: xuyiping, suzhuangluan, dan.zhao, oliver.fu, linuxarm, puck.chen
Add ion cached pool in system heap. This patch add a cached pool
in system heap. It has a great improvement of alloc for cached
buffer.
v1: Makes the cached buffer zeroed before going to pool
Signed-off-by: Chen Feng <puck.chen@hisilicon.com>
Signed-off-by: Xia Qing <saberlily.xia@hisilicon.com>
Reviewed-by: Fu Jun <oliver.fu@hisilicon.com>
---
drivers/staging/android/ion/ion_system_heap.c | 155 +++++++++++++++++---------
1 file changed, 103 insertions(+), 52 deletions(-)
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index b69dfc7..4b14a0b 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -49,47 +49,55 @@ static inline unsigned int order_to_size(int order)
struct ion_system_heap {
struct ion_heap heap;
- struct ion_page_pool *pools[0];
+ struct ion_page_pool *uncached_pools[0];
+ struct ion_page_pool *cached_pools[0];
};
+/**
+ * The page from page-pool are all zeroed before. We need do cache
+ * clean for cached buffer. The uncached buffer are always non-cached
+ * since it's allocated. So no need for non-cached pages.
+ */
static struct page *alloc_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer,
unsigned long order)
{
bool cached = ion_buffer_cached(buffer);
- struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ struct ion_page_pool *pool;
struct page *page;
- if (!cached) {
- page = ion_page_pool_alloc(pool);
- } else {
- gfp_t gfp_flags = low_order_gfp_flags;
+ if (!cached)
+ pool = heap->uncached_pools[order_to_index(order)];
+ else
+ pool = heap->cached_pools[order_to_index(order)];
- if (order > 4)
- gfp_flags = high_order_gfp_flags;
- page = alloc_pages(gfp_flags | __GFP_COMP, order);
- if (!page)
- return NULL;
- ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
- DMA_BIDIRECTIONAL);
- }
+ page = ion_page_pool_alloc(pool);
+ if (cached)
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
return page;
}
static void free_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer, struct page *page)
{
+ struct ion_page_pool *pool;
unsigned int order = compound_order(page);
bool cached = ion_buffer_cached(buffer);
- if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
- struct ion_page_pool *pool = heap->pools[order_to_index(order)];
-
- ion_page_pool_free(pool, page);
- } else {
+ /* go to system */
+ if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
__free_pages(page, order);
+ return;
}
+
+ if (!cached)
+ pool = heap->uncached_pools[order_to_index(order)];
+ else
+ pool = heap->cached_pools[order_to_index(order)];
+
+ ion_page_pool_free(pool, page);
}
@@ -181,16 +189,11 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
struct ion_system_heap,
heap);
struct sg_table *table = buffer->sg_table;
- bool cached = ion_buffer_cached(buffer);
struct scatterlist *sg;
int i;
- /*
- * uncached pages come from the page pools, zero them before returning
- * for security purposes (other allocations are zerod at
- * alloc time
- */
- if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
+ /* zero the buffer before goto page pool */
+ if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
ion_heap_buffer_zero(buffer);
for_each_sg(table->sgl, sg, table->nents, i)
@@ -224,19 +227,29 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
only_scan = 1;
for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = sys_heap->pools[i];
-
- nr_freed = ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
- nr_total += nr_freed;
+ struct ion_page_pool *pool = sys_heap->uncached_pools[i];
if (!only_scan) {
+ nr_freed = ion_page_pool_shrink(pool, gfp_mask,
+ nr_to_scan);
nr_to_scan -= nr_freed;
- /* shrink completed */
+ nr_total += nr_freed;
+ pool = sys_heap->cached_pools[i];
+ nr_freed = ion_page_pool_shrink(pool, gfp_mask,
+ nr_to_scan);
+ nr_to_scan -= nr_freed;
+ nr_total += nr_freed;
if (nr_to_scan <= 0)
break;
+ } else {
+ nr_freed = ion_page_pool_shrink(pool, gfp_mask,
+ nr_to_scan);
+ nr_total += nr_freed;
+ nr_freed = ion_page_pool_shrink(pool, gfp_mask,
+ nr_to_scan);
+ nr_total += nr_freed;
}
}
-
return nr_total;
}
@@ -259,27 +272,69 @@ static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
struct ion_system_heap,
heap);
int i;
+ struct ion_page_pool *pool;
for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool = sys_heap->pools[i];
+ pool = sys_heap->uncached_pools[i];
- seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
+ seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
pool->high_count, pool->order,
(PAGE_SIZE << pool->order) * pool->high_count);
- seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
+ seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
pool->low_count, pool->order,
(PAGE_SIZE << pool->order) * pool->low_count);
}
+
+ for (i = 0; i < num_orders; i++) {
+ pool = sys_heap->cached_pools[i];
+
+ seq_printf(s, "%d order %u highmem pages cached %lu total\n",
+ pool->high_count, pool->order,
+ (PAGE_SIZE << pool->order) * pool->high_count);
+ seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
+ pool->low_count, pool->order,
+ (PAGE_SIZE << pool->order) * pool->low_count);
+ }
+ return 0;
+}
+
+static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
+{
+ int i;
+
+ for (i = 0; i < num_orders; i++)
+ if (pools[i])
+ ion_page_pool_destroy(pools[i]);
+}
+
+static int ion_system_heap_create_pools(struct ion_page_pool **pools)
+{
+ int i;
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool;
+
+ if (orders[i] > 4)
+ gfp_flags = high_order_gfp_flags;
+
+ pool = ion_page_pool_create(gfp_flags, orders[i]);
+ if (!pool)
+ goto err_create_pool;
+ pools[i] = pool;
+ }
return 0;
+err_create_pool:
+ ion_system_heap_destroy_pools(pools);
+ return -ENOMEM;
}
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
{
struct ion_system_heap *heap;
- int i;
heap = kzalloc(sizeof(struct ion_system_heap) +
- sizeof(struct ion_page_pool *) * num_orders,
+ sizeof(struct ion_page_pool *) * num_orders * 2,
GFP_KERNEL);
if (!heap)
return ERR_PTR(-ENOMEM);
@@ -287,24 +342,18 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
heap->heap.type = ION_HEAP_TYPE_SYSTEM;
heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
- for (i = 0; i < num_orders; i++) {
- struct ion_page_pool *pool;
- gfp_t gfp_flags = low_order_gfp_flags;
+ if (ion_system_heap_create_pools(heap->uncached_pools))
+ goto free_heap;
- if (orders[i] > 4)
- gfp_flags = high_order_gfp_flags;
- pool = ion_page_pool_create(gfp_flags, orders[i]);
- if (!pool)
- goto destroy_pools;
- heap->pools[i] = pool;
- }
+ if (ion_system_heap_create_pools(heap->cached_pools))
+ goto destroy_uncached_pools;
heap->heap.debug_show = ion_system_heap_debug_show;
return &heap->heap;
-destroy_pools:
- while (i--)
- ion_page_pool_destroy(heap->pools[i]);
+destroy_uncached_pools:
+ ion_system_heap_destroy_pools(heap->uncached_pools);
+free_heap:
kfree(heap);
return ERR_PTR(-ENOMEM);
}
@@ -316,8 +365,10 @@ void ion_system_heap_destroy(struct ion_heap *heap)
heap);
int i;
- for (i = 0; i < num_orders; i++)
- ion_page_pool_destroy(sys_heap->pools[i]);
+ for (i = 0; i < num_orders; i++) {
+ ion_page_pool_destroy(sys_heap->uncached_pools[i]);
+ ion_page_pool_destroy(sys_heap->cached_pools[i]);
+ }
kfree(sys_heap);
}
--
1.9.1
^ permalink raw reply related [flat|nested] 3+ messages in thread* Re: [PATCH v1] ION: Sys_heap: Add cached pool to spead up cached buffer alloc
2016-05-09 11:35 [PATCH v1] ION: Sys_heap: Add cached pool to spead up cached buffer alloc Chen Feng
@ 2016-05-10 22:48 ` Laura Abbott
2016-05-11 2:26 ` Chen Feng
0 siblings, 1 reply; 3+ messages in thread
From: Laura Abbott @ 2016-05-10 22:48 UTC (permalink / raw)
To: Chen Feng, saberlily.xia, gregkh, arve, riandrews, paul.gortmaker,
bmarsh94, devel, linux-kernel
Cc: xuyiping, suzhuangluan, dan.zhao, oliver.fu, linuxarm, puck.chen
On 05/09/2016 04:35 AM, Chen Feng wrote:
> Add ion cached pool in system heap. This patch add a cached pool
> in system heap. It has a great improvement of alloc for cached
> buffer.
>
Can you give some benchmark numbers here?
> v1: Makes the cached buffer zeroed before going to pool
>
> Signed-off-by: Chen Feng <puck.chen@hisilicon.com>
> Signed-off-by: Xia Qing <saberlily.xia@hisilicon.com>
> Reviewed-by: Fu Jun <oliver.fu@hisilicon.com>
> ---
> drivers/staging/android/ion/ion_system_heap.c | 155 +++++++++++++++++---------
> 1 file changed, 103 insertions(+), 52 deletions(-)
>
> diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
> index b69dfc7..4b14a0b 100644
> --- a/drivers/staging/android/ion/ion_system_heap.c
> +++ b/drivers/staging/android/ion/ion_system_heap.c
> @@ -49,47 +49,55 @@ static inline unsigned int order_to_size(int order)
>
> struct ion_system_heap {
> struct ion_heap heap;
> - struct ion_page_pool *pools[0];
> + struct ion_page_pool *uncached_pools[0];
> + struct ion_page_pool *cached_pools[0];
> };
>
I don't think this is correct based on https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html
and some discussions with others. Their tests showed this may just result in the
two struct members aliasing, which is not what we want.
The flexible array isn't really necessary here anyway. The number of orders we want
is known at compile time and
-static const int num_orders = ARRAY_SIZE(orders);
+
+#define NUM_ORDERS ARRAY_SIZE(orders)
+
should let the NUM_ORDERS be used as a constant initializer in the struct.
> +/**
> + * The page from page-pool are all zeroed before. We need do cache
> + * clean for cached buffer. The uncached buffer are always non-cached
> + * since it's allocated. So no need for non-cached pages.
> + */
> static struct page *alloc_buffer_page(struct ion_system_heap *heap,
> struct ion_buffer *buffer,
> unsigned long order)
> {
> bool cached = ion_buffer_cached(buffer);
> - struct ion_page_pool *pool = heap->pools[order_to_index(order)];
> + struct ion_page_pool *pool;
> struct page *page;
>
> - if (!cached) {
> - page = ion_page_pool_alloc(pool);
> - } else {
> - gfp_t gfp_flags = low_order_gfp_flags;
> + if (!cached)
> + pool = heap->uncached_pools[order_to_index(order)];
> + else
> + pool = heap->cached_pools[order_to_index(order)];
>
> - if (order > 4)
> - gfp_flags = high_order_gfp_flags;
> - page = alloc_pages(gfp_flags | __GFP_COMP, order);
> - if (!page)
> - return NULL;
> - ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
> - DMA_BIDIRECTIONAL);
> - }
> + page = ion_page_pool_alloc(pool);
>
> + if (cached)
> + ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
> + DMA_BIDIRECTIONAL);
> return page;
> }
>
This is doing an extra sync for newly allocated pages. If the buffer was
just allocated the sync should be skipped.
> static void free_buffer_page(struct ion_system_heap *heap,
> struct ion_buffer *buffer, struct page *page)
> {
> + struct ion_page_pool *pool;
> unsigned int order = compound_order(page);
> bool cached = ion_buffer_cached(buffer);
>
> - if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
> - struct ion_page_pool *pool = heap->pools[order_to_index(order)];
> -
> - ion_page_pool_free(pool, page);
> - } else {
> + /* go to system */
> + if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
> __free_pages(page, order);
> + return;
> }
> +
> + if (!cached)
> + pool = heap->uncached_pools[order_to_index(order)];
> + else
> + pool = heap->cached_pools[order_to_index(order)];
> +
> + ion_page_pool_free(pool, page);
> }
>
>
> @@ -181,16 +189,11 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
> struct ion_system_heap,
> heap);
> struct sg_table *table = buffer->sg_table;
> - bool cached = ion_buffer_cached(buffer);
> struct scatterlist *sg;
> int i;
>
> - /*
> - * uncached pages come from the page pools, zero them before returning
> - * for security purposes (other allocations are zerod at
> - * alloc time
> - */
> - if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
> + /* zero the buffer before goto page pool */
> + if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
> ion_heap_buffer_zero(buffer);
>
> for_each_sg(table->sgl, sg, table->nents, i)
> @@ -224,19 +227,29 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
> only_scan = 1;
>
> for (i = 0; i < num_orders; i++) {
> - struct ion_page_pool *pool = sys_heap->pools[i];
> -
> - nr_freed = ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
> - nr_total += nr_freed;
> + struct ion_page_pool *pool = sys_heap->uncached_pools[i];
>
> if (!only_scan) {
> + nr_freed = ion_page_pool_shrink(pool, gfp_mask,
> + nr_to_scan);
> nr_to_scan -= nr_freed;
> - /* shrink completed */
> + nr_total += nr_freed;
> + pool = sys_heap->cached_pools[i];
> + nr_freed = ion_page_pool_shrink(pool, gfp_mask,
> + nr_to_scan);
> + nr_to_scan -= nr_freed;
> + nr_total += nr_freed;
> if (nr_to_scan <= 0)
> break;
> + } else {
> + nr_freed = ion_page_pool_shrink(pool, gfp_mask,
> + nr_to_scan);
> + nr_total += nr_freed;
> + nr_freed = ion_page_pool_shrink(pool, gfp_mask,
> + nr_to_scan);
> + nr_total += nr_freed;
> }
> }
> -
> return nr_total;
> }
>
> @@ -259,27 +272,69 @@ static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
> struct ion_system_heap,
> heap);
> int i;
> + struct ion_page_pool *pool;
>
> for (i = 0; i < num_orders; i++) {
> - struct ion_page_pool *pool = sys_heap->pools[i];
> + pool = sys_heap->uncached_pools[i];
>
> - seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
> + seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
> pool->high_count, pool->order,
> (PAGE_SIZE << pool->order) * pool->high_count);
> - seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
> + seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
> pool->low_count, pool->order,
> (PAGE_SIZE << pool->order) * pool->low_count);
> }
> +
> + for (i = 0; i < num_orders; i++) {
> + pool = sys_heap->cached_pools[i];
> +
> + seq_printf(s, "%d order %u highmem pages cached %lu total\n",
> + pool->high_count, pool->order,
> + (PAGE_SIZE << pool->order) * pool->high_count);
> + seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
> + pool->low_count, pool->order,
> + (PAGE_SIZE << pool->order) * pool->low_count);
> + }
> + return 0;
> +}
> +
> +static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
> +{
> + int i;
> +
> + for (i = 0; i < num_orders; i++)
> + if (pools[i])
> + ion_page_pool_destroy(pools[i]);
> +}
> +
> +static int ion_system_heap_create_pools(struct ion_page_pool **pools)
> +{
> + int i;
> + gfp_t gfp_flags = low_order_gfp_flags;
> +
> + for (i = 0; i < num_orders; i++) {
> + struct ion_page_pool *pool;
> +
> + if (orders[i] > 4)
> + gfp_flags = high_order_gfp_flags;
> +
> + pool = ion_page_pool_create(gfp_flags, orders[i]);
> + if (!pool)
> + goto err_create_pool;
> + pools[i] = pool;
> + }
> return 0;
> +err_create_pool:
> + ion_system_heap_destroy_pools(pools);
> + return -ENOMEM;
> }
>
> struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
> {
> struct ion_system_heap *heap;
> - int i;
>
> heap = kzalloc(sizeof(struct ion_system_heap) +
> - sizeof(struct ion_page_pool *) * num_orders,
> + sizeof(struct ion_page_pool *) * num_orders * 2,
> GFP_KERNEL);
> if (!heap)
> return ERR_PTR(-ENOMEM);
> @@ -287,24 +342,18 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
> heap->heap.type = ION_HEAP_TYPE_SYSTEM;
> heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
>
> - for (i = 0; i < num_orders; i++) {
> - struct ion_page_pool *pool;
> - gfp_t gfp_flags = low_order_gfp_flags;
> + if (ion_system_heap_create_pools(heap->uncached_pools))
> + goto free_heap;
>
> - if (orders[i] > 4)
> - gfp_flags = high_order_gfp_flags;
> - pool = ion_page_pool_create(gfp_flags, orders[i]);
> - if (!pool)
> - goto destroy_pools;
> - heap->pools[i] = pool;
> - }
> + if (ion_system_heap_create_pools(heap->cached_pools))
> + goto destroy_uncached_pools;
>
> heap->heap.debug_show = ion_system_heap_debug_show;
> return &heap->heap;
>
> -destroy_pools:
> - while (i--)
> - ion_page_pool_destroy(heap->pools[i]);
> +destroy_uncached_pools:
> + ion_system_heap_destroy_pools(heap->uncached_pools);
> +free_heap:
> kfree(heap);
> return ERR_PTR(-ENOMEM);
> }
> @@ -316,8 +365,10 @@ void ion_system_heap_destroy(struct ion_heap *heap)
> heap);
> int i;
>
> - for (i = 0; i < num_orders; i++)
> - ion_page_pool_destroy(sys_heap->pools[i]);
> + for (i = 0; i < num_orders; i++) {
> + ion_page_pool_destroy(sys_heap->uncached_pools[i]);
> + ion_page_pool_destroy(sys_heap->cached_pools[i]);
> + }
> kfree(sys_heap);
> }
>
>
Thanks,
Laura
^ permalink raw reply [flat|nested] 3+ messages in thread* Re: [PATCH v1] ION: Sys_heap: Add cached pool to spead up cached buffer alloc
2016-05-10 22:48 ` Laura Abbott
@ 2016-05-11 2:26 ` Chen Feng
0 siblings, 0 replies; 3+ messages in thread
From: Chen Feng @ 2016-05-11 2:26 UTC (permalink / raw)
To: Laura Abbott, saberlily.xia, gregkh, arve, riandrews,
paul.gortmaker, bmarsh94, devel, linux-kernel
Cc: xuyiping, suzhuangluan, dan.zhao, oliver.fu, linuxarm, puck.chen
On 2016/5/11 6:48, Laura Abbott wrote:
> On 05/09/2016 04:35 AM, Chen Feng wrote:
>> Add ion cached pool in system heap. This patch add a cached pool
>> in system heap. It has a great improvement of alloc for cached
>> buffer.
>>
>
> Can you give some benchmark numbers here?
>
ok, My test is using iontest userspace.
I will share the result next version.
By the way, which benchmark you prefer to use for ION test?
>> v1: Makes the cached buffer zeroed before going to pool
>>
>> Signed-off-by: Chen Feng <puck.chen@hisilicon.com>
>> Signed-off-by: Xia Qing <saberlily.xia@hisilicon.com>
>> Reviewed-by: Fu Jun <oliver.fu@hisilicon.com>
>> ---
>> drivers/staging/android/ion/ion_system_heap.c | 155 +++++++++++++++++---------
>> 1 file changed, 103 insertions(+), 52 deletions(-)
>>
>> diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
>> index b69dfc7..4b14a0b 100644
>> --- a/drivers/staging/android/ion/ion_system_heap.c
>> +++ b/drivers/staging/android/ion/ion_system_heap.c
>> @@ -49,47 +49,55 @@ static inline unsigned int order_to_size(int order)
>>
>> struct ion_system_heap {
>> struct ion_heap heap;
>> - struct ion_page_pool *pools[0];
>> + struct ion_page_pool *uncached_pools[0];
>> + struct ion_page_pool *cached_pools[0];
>> };
>>
>
>
> I don't think this is correct based on https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html
> and some discussions with others. Their tests showed this may just result in the
> two struct members aliasing, which is not what we want.
>
> The flexible array isn't really necessary here anyway. The number of orders we want
> is known at compile time and
>
Yes, change this latter.
> -static const int num_orders = ARRAY_SIZE(orders);
> +
> +#define NUM_ORDERS ARRAY_SIZE(orders)
> +
>
> should let the NUM_ORDERS be used as a constant initializer in the struct.
>
ok.
>> +/**
>> + * The page from page-pool are all zeroed before. We need do cache
>> + * clean for cached buffer. The uncached buffer are always non-cached
>> + * since it's allocated. So no need for non-cached pages.
>> + */
>> static struct page *alloc_buffer_page(struct ion_system_heap *heap,
>> struct ion_buffer *buffer,
>> unsigned long order)
>> {
>> bool cached = ion_buffer_cached(buffer);
>> - struct ion_page_pool *pool = heap->pools[order_to_index(order)];
>> + struct ion_page_pool *pool;
>> struct page *page;
>>
>> - if (!cached) {
>> - page = ion_page_pool_alloc(pool);
>> - } else {
>> - gfp_t gfp_flags = low_order_gfp_flags;
>> + if (!cached)
>> + pool = heap->uncached_pools[order_to_index(order)];
>> + else
>> + pool = heap->cached_pools[order_to_index(order)];
>>
>> - if (order > 4)
>> - gfp_flags = high_order_gfp_flags;
>> - page = alloc_pages(gfp_flags | __GFP_COMP, order);
>> - if (!page)
>> - return NULL;
>> - ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
>> - DMA_BIDIRECTIONAL);
>> - }
>> + page = ion_page_pool_alloc(pool);
>>
>> + if (cached)
>> + ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
>> + DMA_BIDIRECTIONAL);
>> return page;
>> }
>>
>
> This is doing an extra sync for newly allocated pages. If the buffer was
> just allocated the sync should be skipped.
>
Yes, if we want not to do this extra sync for newly allocated pages.
I will add a new parameter in the ion page-pool to distinguish from
it's cached or not.
>> static void free_buffer_page(struct ion_system_heap *heap,
>> struct ion_buffer *buffer, struct page *page)
>> {
>> + struct ion_page_pool *pool;
>> unsigned int order = compound_order(page);
>> bool cached = ion_buffer_cached(buffer);
>>
>> - if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
>> - struct ion_page_pool *pool = heap->pools[order_to_index(order)];
>> -
>> - ion_page_pool_free(pool, page);
>> - } else {
>> + /* go to system */
>> + if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
>> __free_pages(page, order);
>> + return;
>> }
>> +
>> + if (!cached)
>> + pool = heap->uncached_pools[order_to_index(order)];
>> + else
>> + pool = heap->cached_pools[order_to_index(order)];
>> +
>> + ion_page_pool_free(pool, page);
>> }
>>
>>
>> @@ -181,16 +189,11 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
>> struct ion_system_heap,
>> heap);
>> struct sg_table *table = buffer->sg_table;
>> - bool cached = ion_buffer_cached(buffer);
>> struct scatterlist *sg;
>> int i;
>>
>> - /*
>> - * uncached pages come from the page pools, zero them before returning
>> - * for security purposes (other allocations are zerod at
>> - * alloc time
>> - */
>> - if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
>> + /* zero the buffer before goto page pool */
>> + if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
>> ion_heap_buffer_zero(buffer);
>>
>> for_each_sg(table->sgl, sg, table->nents, i)
>> @@ -224,19 +227,29 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
>> only_scan = 1;
>>
>> for (i = 0; i < num_orders; i++) {
>> - struct ion_page_pool *pool = sys_heap->pools[i];
>> -
>> - nr_freed = ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
>> - nr_total += nr_freed;
>> + struct ion_page_pool *pool = sys_heap->uncached_pools[i];
>>
>> if (!only_scan) {
>> + nr_freed = ion_page_pool_shrink(pool, gfp_mask,
>> + nr_to_scan);
>> nr_to_scan -= nr_freed;
>> - /* shrink completed */
>> + nr_total += nr_freed;
>> + pool = sys_heap->cached_pools[i];
Here are some bugs, I will also modify here next version.
>> + nr_freed = ion_page_pool_shrink(pool, gfp_mask,
>> + nr_to_scan);
>> + nr_to_scan -= nr_freed;
>> + nr_total += nr_freed;
>> if (nr_to_scan <= 0)
>> break;
>> + } else {
>> + nr_freed = ion_page_pool_shrink(pool, gfp_mask,
>> + nr_to_scan);
>> + nr_total += nr_freed;
>> + nr_freed = ion_page_pool_shrink(pool, gfp_mask,
>> + nr_to_scan);
>> + nr_total += nr_freed;
>> }
>> }
>> -
>> return nr_total;
>> }
>>
>> @@ -259,27 +272,69 @@ static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
>> struct ion_system_heap,
>> heap);
>> int i;
>> + struct ion_page_pool *pool;
>>
>> for (i = 0; i < num_orders; i++) {
>> - struct ion_page_pool *pool = sys_heap->pools[i];
>> + pool = sys_heap->uncached_pools[i];
>>
>> - seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
>> + seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
>> pool->high_count, pool->order,
>> (PAGE_SIZE << pool->order) * pool->high_count);
>> - seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
>> + seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
>> pool->low_count, pool->order,
>> (PAGE_SIZE << pool->order) * pool->low_count);
>> }
>> +
>> + for (i = 0; i < num_orders; i++) {
>> + pool = sys_heap->cached_pools[i];
>> +
>> + seq_printf(s, "%d order %u highmem pages cached %lu total\n",
>> + pool->high_count, pool->order,
>> + (PAGE_SIZE << pool->order) * pool->high_count);
>> + seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
>> + pool->low_count, pool->order,
>> + (PAGE_SIZE << pool->order) * pool->low_count);
>> + }
>> + return 0;
>> +}
>> +
>> +static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
>> +{
>> + int i;
>> +
>> + for (i = 0; i < num_orders; i++)
>> + if (pools[i])
>> + ion_page_pool_destroy(pools[i]);
>> +}
>> +
>> +static int ion_system_heap_create_pools(struct ion_page_pool **pools)
>> +{
>> + int i;
>> + gfp_t gfp_flags = low_order_gfp_flags;
>> +
>> + for (i = 0; i < num_orders; i++) {
>> + struct ion_page_pool *pool;
>> +
>> + if (orders[i] > 4)
>> + gfp_flags = high_order_gfp_flags;
>> +
>> + pool = ion_page_pool_create(gfp_flags, orders[i]);
>> + if (!pool)
>> + goto err_create_pool;
>> + pools[i] = pool;
>> + }
>> return 0;
>> +err_create_pool:
>> + ion_system_heap_destroy_pools(pools);
>> + return -ENOMEM;
>> }
>>
>> struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
>> {
>> struct ion_system_heap *heap;
>> - int i;
>>
>> heap = kzalloc(sizeof(struct ion_system_heap) +
>> - sizeof(struct ion_page_pool *) * num_orders,
>> + sizeof(struct ion_page_pool *) * num_orders * 2,
>> GFP_KERNEL);
>> if (!heap)
>> return ERR_PTR(-ENOMEM);
>> @@ -287,24 +342,18 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
>> heap->heap.type = ION_HEAP_TYPE_SYSTEM;
>> heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
>>
>> - for (i = 0; i < num_orders; i++) {
>> - struct ion_page_pool *pool;
>> - gfp_t gfp_flags = low_order_gfp_flags;
>> + if (ion_system_heap_create_pools(heap->uncached_pools))
>> + goto free_heap;
>>
>> - if (orders[i] > 4)
>> - gfp_flags = high_order_gfp_flags;
>> - pool = ion_page_pool_create(gfp_flags, orders[i]);
>> - if (!pool)
>> - goto destroy_pools;
>> - heap->pools[i] = pool;
>> - }
>> + if (ion_system_heap_create_pools(heap->cached_pools))
>> + goto destroy_uncached_pools;
>>
>> heap->heap.debug_show = ion_system_heap_debug_show;
>> return &heap->heap;
>>
>> -destroy_pools:
>> - while (i--)
>> - ion_page_pool_destroy(heap->pools[i]);
>> +destroy_uncached_pools:
>> + ion_system_heap_destroy_pools(heap->uncached_pools);
>> +free_heap:
>> kfree(heap);
>> return ERR_PTR(-ENOMEM);
>> }
>> @@ -316,8 +365,10 @@ void ion_system_heap_destroy(struct ion_heap *heap)
>> heap);
>> int i;
>>
>> - for (i = 0; i < num_orders; i++)
>> - ion_page_pool_destroy(sys_heap->pools[i]);
>> + for (i = 0; i < num_orders; i++) {
>> + ion_page_pool_destroy(sys_heap->uncached_pools[i]);
>> + ion_page_pool_destroy(sys_heap->cached_pools[i]);
>> + }
>> kfree(sys_heap);
>> }
>>
>>
>
> Thanks,
> Laura
>
>
> .
>
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2016-05-11 2:33 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-05-09 11:35 [PATCH v1] ION: Sys_heap: Add cached pool to spead up cached buffer alloc Chen Feng
2016-05-10 22:48 ` Laura Abbott
2016-05-11 2:26 ` Chen Feng
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox