* [PATCH] powerpc/64s/slice: Use addr limit when computing slice mask
@ 2017-11-10 4:55 Aneesh Kumar K.V
2017-11-10 9:44 ` Michael Ellerman
2017-11-24 9:46 ` Michael Ellerman
0 siblings, 2 replies; 6+ messages in thread
From: Aneesh Kumar K.V @ 2017-11-10 4:55 UTC (permalink / raw)
To: benh, paulus, mpe, npiggin; +Cc: linuxppc-dev, Aneesh Kumar K.V
While computing slice mask for the free area we need make sure we only search
in the addr limit applicable for this mmap. We update the slb_addr_limit
after we request for a mmap above 128TB. But the following mmap request
with hint addr below 128TB should still limit its search to below 128TB. ie.
we should not use slb_addr_limit to compute slice mask in this case. Instead,
we should derive high addr limit based on the mmap hint addr value.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
arch/powerpc/mm/slice.c | 34 ++++++++++++++++++++++------------
1 file changed, 22 insertions(+), 12 deletions(-)
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 564fff06f5c1..23ec2c5e3b78 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -122,7 +122,8 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
return !slice_area_is_free(mm, start, end - start);
}
-static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
+static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
+ unsigned long high_limit)
{
unsigned long i;
@@ -133,15 +134,16 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
if (!slice_low_has_vma(mm, i))
ret->low_slices |= 1u << i;
- if (mm->context.slb_addr_limit <= SLICE_LOW_TOP)
+ if (high_limit <= SLICE_LOW_TOP)
return;
- for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++)
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
if (!slice_high_has_vma(mm, i))
__set_bit(i, ret->high_slices);
}
-static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret)
+static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret,
+ unsigned long high_limit)
{
unsigned char *hpsizes;
int index, mask_index;
@@ -156,8 +158,11 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
if (((lpsizes >> (i * 4)) & 0xf) == psize)
ret->low_slices |= 1u << i;
+ if (high_limit <= SLICE_LOW_TOP)
+ return;
+
hpsizes = mm->context.high_slices_psize;
- for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) {
mask_index = i & 0x1;
index = i >> 1;
if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
@@ -169,6 +174,10 @@ static int slice_check_fit(struct mm_struct *mm,
struct slice_mask mask, struct slice_mask available)
{
DECLARE_BITMAP(result, SLICE_NUM_HIGH);
+ /*
+ * Make sure we just do bit compare only to the max
+ * addr limit and not the full bit map size.
+ */
unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
bitmap_and(result, mask.high_slices,
@@ -472,7 +481,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* First make up a "good" mask of slices that have the right size
* already
*/
- slice_mask_for_size(mm, psize, &good_mask);
+ slice_mask_for_size(mm, psize, &good_mask, high_limit);
slice_print_mask(" good_mask", good_mask);
/*
@@ -497,7 +506,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
#ifdef CONFIG_PPC_64K_PAGES
/* If we support combo pages, we can allow 64k pages in 4k slices */
if (psize == MMU_PAGE_64K) {
- slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
+ slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
if (fixed)
slice_or_mask(&good_mask, &compat_mask);
}
@@ -530,11 +539,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
return newaddr;
}
}
-
- /* We don't fit in the good mask, check what other slices are
+ /*
+ * We don't fit in the good mask, check what other slices are
* empty and thus can be converted
*/
- slice_mask_for_free(mm, &potential_mask);
+ slice_mask_for_free(mm, &potential_mask, high_limit);
slice_or_mask(&potential_mask, &good_mask);
slice_print_mask(" potential", potential_mask);
@@ -744,17 +753,18 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
{
struct slice_mask mask, available;
unsigned int psize = mm->context.user_psize;
+ unsigned long high_limit = mm->context.slb_addr_limit;
if (radix_enabled())
return 0;
slice_range_to_mask(addr, len, &mask);
- slice_mask_for_size(mm, psize, &available);
+ slice_mask_for_size(mm, psize, &available, high_limit);
#ifdef CONFIG_PPC_64K_PAGES
/* We need to account for 4k slices too */
if (psize == MMU_PAGE_64K) {
struct slice_mask compat_mask;
- slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
+ slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
slice_or_mask(&available, &compat_mask);
}
#endif
--
2.13.6
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH] powerpc/64s/slice: Use addr limit when computing slice mask
2017-11-10 4:55 [PATCH] powerpc/64s/slice: Use addr limit when computing slice mask Aneesh Kumar K.V
@ 2017-11-10 9:44 ` Michael Ellerman
2017-11-10 17:29 ` Aneesh Kumar K.V
2017-11-24 9:46 ` Michael Ellerman
1 sibling, 1 reply; 6+ messages in thread
From: Michael Ellerman @ 2017-11-10 9:44 UTC (permalink / raw)
To: Aneesh Kumar K.V, benh, paulus, npiggin; +Cc: linuxppc-dev, Aneesh Kumar K.V
"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> writes:
> While computing slice mask for the free area we need make sure we only search
> in the addr limit applicable for this mmap. We update the slb_addr_limit
> after we request for a mmap above 128TB. But the following mmap request
> with hint addr below 128TB should still limit its search to below 128TB. ie.
> we should not use slb_addr_limit to compute slice mask in this case. Instead,
> we should derive high addr limit based on the mmap hint addr value.
>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> ---
> arch/powerpc/mm/slice.c | 34 ++++++++++++++++++++++------------
> 1 file changed, 22 insertions(+), 12 deletions(-)
How does this relate to the fixes Nick has sent?
cheers
> diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
> index 564fff06f5c1..23ec2c5e3b78 100644
> --- a/arch/powerpc/mm/slice.c
> +++ b/arch/powerpc/mm/slice.c
> @@ -122,7 +122,8 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
> return !slice_area_is_free(mm, start, end - start);
> }
>
> -static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
> +static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
> + unsigned long high_limit)
> {
> unsigned long i;
>
> @@ -133,15 +134,16 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
> if (!slice_low_has_vma(mm, i))
> ret->low_slices |= 1u << i;
>
> - if (mm->context.slb_addr_limit <= SLICE_LOW_TOP)
> + if (high_limit <= SLICE_LOW_TOP)
> return;
>
> - for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++)
> + for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
> if (!slice_high_has_vma(mm, i))
> __set_bit(i, ret->high_slices);
> }
>
> -static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret)
> +static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret,
> + unsigned long high_limit)
> {
> unsigned char *hpsizes;
> int index, mask_index;
> @@ -156,8 +158,11 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
> if (((lpsizes >> (i * 4)) & 0xf) == psize)
> ret->low_slices |= 1u << i;
>
> + if (high_limit <= SLICE_LOW_TOP)
> + return;
> +
> hpsizes = mm->context.high_slices_psize;
> - for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
> + for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) {
> mask_index = i & 0x1;
> index = i >> 1;
> if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
> @@ -169,6 +174,10 @@ static int slice_check_fit(struct mm_struct *mm,
> struct slice_mask mask, struct slice_mask available)
> {
> DECLARE_BITMAP(result, SLICE_NUM_HIGH);
> + /*
> + * Make sure we just do bit compare only to the max
> + * addr limit and not the full bit map size.
> + */
> unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
>
> bitmap_and(result, mask.high_slices,
> @@ -472,7 +481,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
> /* First make up a "good" mask of slices that have the right size
> * already
> */
> - slice_mask_for_size(mm, psize, &good_mask);
> + slice_mask_for_size(mm, psize, &good_mask, high_limit);
> slice_print_mask(" good_mask", good_mask);
>
> /*
> @@ -497,7 +506,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
> #ifdef CONFIG_PPC_64K_PAGES
> /* If we support combo pages, we can allow 64k pages in 4k slices */
> if (psize == MMU_PAGE_64K) {
> - slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
> + slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
> if (fixed)
> slice_or_mask(&good_mask, &compat_mask);
> }
> @@ -530,11 +539,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
> return newaddr;
> }
> }
> -
> - /* We don't fit in the good mask, check what other slices are
> + /*
> + * We don't fit in the good mask, check what other slices are
> * empty and thus can be converted
> */
> - slice_mask_for_free(mm, &potential_mask);
> + slice_mask_for_free(mm, &potential_mask, high_limit);
> slice_or_mask(&potential_mask, &good_mask);
> slice_print_mask(" potential", potential_mask);
>
> @@ -744,17 +753,18 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
> {
> struct slice_mask mask, available;
> unsigned int psize = mm->context.user_psize;
> + unsigned long high_limit = mm->context.slb_addr_limit;
>
> if (radix_enabled())
> return 0;
>
> slice_range_to_mask(addr, len, &mask);
> - slice_mask_for_size(mm, psize, &available);
> + slice_mask_for_size(mm, psize, &available, high_limit);
> #ifdef CONFIG_PPC_64K_PAGES
> /* We need to account for 4k slices too */
> if (psize == MMU_PAGE_64K) {
> struct slice_mask compat_mask;
> - slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
> + slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
> slice_or_mask(&available, &compat_mask);
> }
> #endif
> --
> 2.13.6
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] powerpc/64s/slice: Use addr limit when computing slice mask
2017-11-10 9:44 ` Michael Ellerman
@ 2017-11-10 17:29 ` Aneesh Kumar K.V
2017-11-11 9:09 ` Nicholas Piggin
0 siblings, 1 reply; 6+ messages in thread
From: Aneesh Kumar K.V @ 2017-11-10 17:29 UTC (permalink / raw)
To: Michael Ellerman, benh, paulus, npiggin; +Cc: linuxppc-dev
Michael Ellerman <mpe@ellerman.id.au> writes:
> "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> writes:
>
>> While computing slice mask for the free area we need make sure we only search
>> in the addr limit applicable for this mmap. We update the slb_addr_limit
>> after we request for a mmap above 128TB. But the following mmap request
>> with hint addr below 128TB should still limit its search to below 128TB. ie.
>> we should not use slb_addr_limit to compute slice mask in this case. Instead,
>> we should derive high addr limit based on the mmap hint addr value.
>>
>> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
>> ---
>> arch/powerpc/mm/slice.c | 34 ++++++++++++++++++++++------------
>> 1 file changed, 22 insertions(+), 12 deletions(-)
>
> How does this relate to the fixes Nick has sent?
This patch is on top of the patch series sent by Nick. Without this
patch we will allocate memory across the 128TB range if hint_addr <
128TB but hint_addr + len is more. Inorder to recreate this issue we
will have to map stack below. Hence one won't hit the error in general
case.
>
> cheers
>
>> diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
>> index 564fff06f5c1..23ec2c5e3b78 100644
>> --- a/arch/powerpc/mm/slice.c
>> +++ b/arch/powerpc/mm/slice.c
>> @@ -122,7 +122,8 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
>> return !slice_area_is_free(mm, start, end - start);
>> }
>>
>> -static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
>> +static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
>> + unsigned long high_limit)
>> {
>> unsigned long i;
>>
>> @@ -133,15 +134,16 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
>> if (!slice_low_has_vma(mm, i))
>> ret->low_slices |= 1u << i;
>>
>> - if (mm->context.slb_addr_limit <= SLICE_LOW_TOP)
>> + if (high_limit <= SLICE_LOW_TOP)
>> return;
>>
>> - for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++)
>> + for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
>> if (!slice_high_has_vma(mm, i))
>> __set_bit(i, ret->high_slices);
>> }
>>
>> -static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret)
>> +static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret,
>> + unsigned long high_limit)
>> {
>> unsigned char *hpsizes;
>> int index, mask_index;
>> @@ -156,8 +158,11 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
>> if (((lpsizes >> (i * 4)) & 0xf) == psize)
>> ret->low_slices |= 1u << i;
>>
>> + if (high_limit <= SLICE_LOW_TOP)
>> + return;
>> +
>> hpsizes = mm->context.high_slices_psize;
>> - for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
>> + for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) {
>> mask_index = i & 0x1;
>> index = i >> 1;
>> if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
>> @@ -169,6 +174,10 @@ static int slice_check_fit(struct mm_struct *mm,
>> struct slice_mask mask, struct slice_mask available)
>> {
>> DECLARE_BITMAP(result, SLICE_NUM_HIGH);
>> + /*
>> + * Make sure we just do bit compare only to the max
>> + * addr limit and not the full bit map size.
>> + */
>> unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
>>
>> bitmap_and(result, mask.high_slices,
>> @@ -472,7 +481,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
>> /* First make up a "good" mask of slices that have the right size
>> * already
>> */
>> - slice_mask_for_size(mm, psize, &good_mask);
>> + slice_mask_for_size(mm, psize, &good_mask, high_limit);
>> slice_print_mask(" good_mask", good_mask);
>>
>> /*
>> @@ -497,7 +506,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
>> #ifdef CONFIG_PPC_64K_PAGES
>> /* If we support combo pages, we can allow 64k pages in 4k slices */
>> if (psize == MMU_PAGE_64K) {
>> - slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
>> + slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
>> if (fixed)
>> slice_or_mask(&good_mask, &compat_mask);
>> }
>> @@ -530,11 +539,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
>> return newaddr;
>> }
>> }
>> -
>> - /* We don't fit in the good mask, check what other slices are
>> + /*
>> + * We don't fit in the good mask, check what other slices are
>> * empty and thus can be converted
>> */
>> - slice_mask_for_free(mm, &potential_mask);
>> + slice_mask_for_free(mm, &potential_mask, high_limit);
>> slice_or_mask(&potential_mask, &good_mask);
>> slice_print_mask(" potential", potential_mask);
>>
>> @@ -744,17 +753,18 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
>> {
>> struct slice_mask mask, available;
>> unsigned int psize = mm->context.user_psize;
>> + unsigned long high_limit = mm->context.slb_addr_limit;
>>
>> if (radix_enabled())
>> return 0;
>>
>> slice_range_to_mask(addr, len, &mask);
>> - slice_mask_for_size(mm, psize, &available);
>> + slice_mask_for_size(mm, psize, &available, high_limit);
>> #ifdef CONFIG_PPC_64K_PAGES
>> /* We need to account for 4k slices too */
>> if (psize == MMU_PAGE_64K) {
>> struct slice_mask compat_mask;
>> - slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
>> + slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
>> slice_or_mask(&available, &compat_mask);
>> }
>> #endif
>> --
>> 2.13.6
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] powerpc/64s/slice: Use addr limit when computing slice mask
2017-11-10 17:29 ` Aneesh Kumar K.V
@ 2017-11-11 9:09 ` Nicholas Piggin
2017-11-11 13:46 ` Aneesh Kumar K.V
0 siblings, 1 reply; 6+ messages in thread
From: Nicholas Piggin @ 2017-11-11 9:09 UTC (permalink / raw)
To: Aneesh Kumar K.V; +Cc: Michael Ellerman, benh, paulus, linuxppc-dev
On Fri, 10 Nov 2017 22:59:57 +0530
"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:
> Michael Ellerman <mpe@ellerman.id.au> writes:
>
> > "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> writes:
> >
> >> While computing slice mask for the free area we need make sure we only search
> >> in the addr limit applicable for this mmap. We update the slb_addr_limit
> >> after we request for a mmap above 128TB. But the following mmap request
> >> with hint addr below 128TB should still limit its search to below 128TB. ie.
> >> we should not use slb_addr_limit to compute slice mask in this case. Instead,
> >> we should derive high addr limit based on the mmap hint addr value.
> >>
> >> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> >> ---
> >> arch/powerpc/mm/slice.c | 34 ++++++++++++++++++++++------------
> >> 1 file changed, 22 insertions(+), 12 deletions(-)
> >
> > How does this relate to the fixes Nick has sent?
>
> This patch is on top of the patch series sent by Nick. Without this
> patch we will allocate memory across the 128TB range if hint_addr <
> 128TB but hint_addr + len is more. Inorder to recreate this issue we
> will have to map stack below. Hence one won't hit the error in general
> case.
I couldn't get it to trigger this case after that series -- hash
get_unmapped_area should be excluding that case up front before
getting into the slice allocator. Do you have an strace to reproduce
it?
Either way I do think it would be good to tighten up all the slice
bitmap limits, including all the other places that hardcodes the
max bitmap size.
Thanks,
Nick
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] powerpc/64s/slice: Use addr limit when computing slice mask
2017-11-11 9:09 ` Nicholas Piggin
@ 2017-11-11 13:46 ` Aneesh Kumar K.V
0 siblings, 0 replies; 6+ messages in thread
From: Aneesh Kumar K.V @ 2017-11-11 13:46 UTC (permalink / raw)
To: Nicholas Piggin; +Cc: Michael Ellerman, benh, paulus, linuxppc-dev
Nicholas Piggin <npiggin@gmail.com> writes:
> On Fri, 10 Nov 2017 22:59:57 +0530
> "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:
>
>> Michael Ellerman <mpe@ellerman.id.au> writes:
>>
>> > "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> writes:
>> >
>> >> While computing slice mask for the free area we need make sure we only search
>> >> in the addr limit applicable for this mmap. We update the slb_addr_limit
>> >> after we request for a mmap above 128TB. But the following mmap request
>> >> with hint addr below 128TB should still limit its search to below 128TB. ie.
>> >> we should not use slb_addr_limit to compute slice mask in this case. Instead,
>> >> we should derive high addr limit based on the mmap hint addr value.
>> >>
>> >> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
>> >> ---
>> >> arch/powerpc/mm/slice.c | 34 ++++++++++++++++++++++------------
>> >> 1 file changed, 22 insertions(+), 12 deletions(-)
>> >
>> > How does this relate to the fixes Nick has sent?
>>
>> This patch is on top of the patch series sent by Nick. Without this
>> patch we will allocate memory across the 128TB range if hint_addr <
>> 128TB but hint_addr + len is more. Inorder to recreate this issue we
>> will have to map stack below. Hence one won't hit the error in general
>> case.
>
> I couldn't get it to trigger this case after that series -- hash
> get_unmapped_area should be excluding that case up front before
> getting into the slice allocator. Do you have an strace to reproduce
> it?
That is correct. This change in slice_get_unmapped_area prevents the
issue I mentioned above. I did have that hunk reverted to check the error
mask creation.
- if (addr > mm->task_size - len ||
+ if (addr > high_limit - len ||
!slice_area_is_free(mm, addr, len))
addr = 0;
}
>
> Either way I do think it would be good to tighten up all the slice
> bitmap limits, including all the other places that hardcodes the
> max bitmap size.
>
-aneesh
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: powerpc/64s/slice: Use addr limit when computing slice mask
2017-11-10 4:55 [PATCH] powerpc/64s/slice: Use addr limit when computing slice mask Aneesh Kumar K.V
2017-11-10 9:44 ` Michael Ellerman
@ 2017-11-24 9:46 ` Michael Ellerman
1 sibling, 0 replies; 6+ messages in thread
From: Michael Ellerman @ 2017-11-24 9:46 UTC (permalink / raw)
To: Aneesh Kumar K.V, benh, paulus, npiggin; +Cc: linuxppc-dev, Aneesh Kumar K.V
On Fri, 2017-11-10 at 04:55:07 UTC, "Aneesh Kumar K.V" wrote:
> While computing slice mask for the free area we need make sure we only search
> in the addr limit applicable for this mmap. We update the slb_addr_limit
> after we request for a mmap above 128TB. But the following mmap request
> with hint addr below 128TB should still limit its search to below 128TB. ie.
> we should not use slb_addr_limit to compute slice mask in this case. Instead,
> we should derive high addr limit based on the mmap hint addr value.
>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Applied to powerpc fixes, thanks.
https://git.kernel.org/powerpc/c/7a06c66835f75fe2be4f154a93cc30
cheers
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2017-11-24 9:46 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-11-10 4:55 [PATCH] powerpc/64s/slice: Use addr limit when computing slice mask Aneesh Kumar K.V
2017-11-10 9:44 ` Michael Ellerman
2017-11-10 17:29 ` Aneesh Kumar K.V
2017-11-11 9:09 ` Nicholas Piggin
2017-11-11 13:46 ` Aneesh Kumar K.V
2017-11-24 9:46 ` Michael Ellerman
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).