* [PATCH] mm/mmap.c: Only call vma_unlock_anon_vm() when failure occurs in expand_upwards() and expand_downwards()
@ 2015-08-31 20:54 Chen Gang
[not found] ` <55E5AD17.6060901@hotmail.com>
0 siblings, 1 reply; 7+ messages in thread
From: Chen Gang @ 2015-08-31 20:54 UTC (permalink / raw)
To: Andrew Morton, mhocko@suse.cz; +Cc: Linux Memory, kernel mailing list
When failure occurs, we need not call khugepaged_enter_vma_merge() or
validate_mm().
Also simplify do_munmap(): declare 'error' 1 time instead of 2 times in
sub-blocks.
Signed-off-by: Chen Gang <gang.chen.5i5j@gmail.com>
---
mm/mmap.c | 116 +++++++++++++++++++++++++++++++-------------------------------
1 file changed, 58 insertions(+), 58 deletions(-)
diff --git a/mm/mmap.c b/mm/mmap.c
index df6d5f0..d32199a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2182,10 +2182,9 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
if (address < PAGE_ALIGN(address+4))
address = PAGE_ALIGN(address+4);
else {
- vma_unlock_anon_vma(vma);
- return -ENOMEM;
+ error = -ENOMEM;
+ goto err;
}
- error = 0;
/* Somebody else might have raced and expanded it already */
if (address> vma->vm_end) {
@@ -2194,38 +2193,39 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
size = address - vma->vm_start;
grow = (address - vma->vm_end)>> PAGE_SHIFT;
- error = -ENOMEM;
- if (vma->vm_pgoff + (size>> PAGE_SHIFT)>= vma->vm_pgoff) {
- error = acct_stack_growth(vma, size, grow);
- if (!error) {
- /*
- * vma_gap_update() doesn't support concurrent
- * updates, but we only hold a shared mmap_sem
- * lock here, so we need to protect against
- * concurrent vma expansions.
- * vma_lock_anon_vma() doesn't help here, as
- * we don't guarantee that all growable vmas
- * in a mm share the same root anon vma.
- * So, we reuse mm->page_table_lock to guard
- * against concurrent vma expansions.
- */
- spin_lock(&vma->vm_mm->page_table_lock);
- anon_vma_interval_tree_pre_update_vma(vma);
- vma->vm_end = address;
- anon_vma_interval_tree_post_update_vma(vma);
- if (vma->vm_next)
- vma_gap_update(vma->vm_next);
- else
- vma->vm_mm->highest_vm_end = address;
- spin_unlock(&vma->vm_mm->page_table_lock);
-
- perf_event_mmap(vma);
- }
+ if (vma->vm_pgoff + (size>> PAGE_SHIFT) < vma->vm_pgoff) {
+ error = -ENOMEM;
+ goto err;
}
+ error = acct_stack_growth(vma, size, grow);
+ if (error)
+ goto err;
+ /*
+ * vma_gap_update() doesn't support concurrent updates, but we
+ * only hold a shared mmap_sem lock here, so we need to protect
+ * against concurrent vma expansions. vma_lock_anon_vma()
+ * doesn't help here, as we don't guarantee that all growable
+ * vmas in a mm share the same root anon vma. So, we reuse mm->
+ * page_table_lock to guard against concurrent vma expansions.
+ */
+ spin_lock(&vma->vm_mm->page_table_lock);
+ anon_vma_interval_tree_pre_update_vma(vma);
+ vma->vm_end = address;
+ anon_vma_interval_tree_post_update_vma(vma);
+ if (vma->vm_next)
+ vma_gap_update(vma->vm_next);
+ else
+ vma->vm_mm->highest_vm_end = address;
+ spin_unlock(&vma->vm_mm->page_table_lock);
+
+ perf_event_mmap(vma);
}
vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(vma->vm_mm);
+ return 0;
+err:
+ vma_unlock_anon_vma(vma);
return error;
}
#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -2265,36 +2265,37 @@ int expand_downwards(struct vm_area_struct *vma,
size = vma->vm_end - address;
grow = (vma->vm_start - address)>> PAGE_SHIFT;
- error = -ENOMEM;
- if (grow <= vma->vm_pgoff) {
- error = acct_stack_growth(vma, size, grow);
- if (!error) {
- /*
- * vma_gap_update() doesn't support concurrent
- * updates, but we only hold a shared mmap_sem
- * lock here, so we need to protect against
- * concurrent vma expansions.
- * vma_lock_anon_vma() doesn't help here, as
- * we don't guarantee that all growable vmas
- * in a mm share the same root anon vma.
- * So, we reuse mm->page_table_lock to guard
- * against concurrent vma expansions.
- */
- spin_lock(&vma->vm_mm->page_table_lock);
- anon_vma_interval_tree_pre_update_vma(vma);
- vma->vm_start = address;
- vma->vm_pgoff -= grow;
- anon_vma_interval_tree_post_update_vma(vma);
- vma_gap_update(vma);
- spin_unlock(&vma->vm_mm->page_table_lock);
-
- perf_event_mmap(vma);
- }
+ if (grow> vma->vm_pgoff) {
+ error = -ENOMEM;
+ goto err;
}
+ error = acct_stack_growth(vma, size, grow);
+ if (error)
+ goto err;
+ /*
+ * vma_gap_update() doesn't support concurrent updates, but we
+ * only hold a shared mmap_sem lock here, so we need to protect
+ * against concurrent vma expansions. vma_lock_anon_vma()
+ * doesn't help here, as we don't guarantee that all growable
+ * vmas in a mm share the same root anon vma. So, we reuse mm->
+ * page_table_lock to guard against concurrent vma expansions.
+ */
+ spin_lock(&vma->vm_mm->page_table_lock);
+ anon_vma_interval_tree_pre_update_vma(vma);
+ vma->vm_start = address;
+ vma->vm_pgoff -= grow;
+ anon_vma_interval_tree_post_update_vma(vma);
+ vma_gap_update(vma);
+ spin_unlock(&vma->vm_mm->page_table_lock);
+
+ perf_event_mmap(vma);
}
vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(vma->vm_mm);
+ return 0;
+err:
+ vma_unlock_anon_vma(vma);
return error;
}
@@ -2542,6 +2543,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
{
unsigned long end;
struct vm_area_struct *vma, *prev, *last;
+ int error;
if ((start & ~PAGE_MASK) || start> TASK_SIZE || len> TASK_SIZE-start)
return -EINVAL;
@@ -2570,8 +2572,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
* places tmp vma above, and higher split_vma places tmp vma below.
*/
if (start> vma->vm_start) {
- int error;
-
/*
* Make sure that map_count on return from munmap() will
* not exceed its limit; but let map_count go just above
@@ -2589,7 +2589,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* Does it split the last one? */
last = find_vma(mm, end);
if (last && end> last->vm_start) {
- int error = __split_vma(mm, last, end, 1);
+ error = __split_vma(mm, last, end, 1);
if (error)
return error;
}
--
1.9.3
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH] mm/mmap.c: Only call vma_unlock_anon_vm() when failure occurs in expand_upwards() and expand_downwards()
[not found] ` <55E5AD17.6060901@hotmail.com>
@ 2015-09-01 13:49 ` Chen Gang
[not found] ` <55E96E01.5010605@hotmail.com>
0 siblings, 1 reply; 7+ messages in thread
From: Chen Gang @ 2015-09-01 13:49 UTC (permalink / raw)
To: Andrew Morton, mhocko@suse.cz; +Cc: Linux Memory, kernel mailing list
[-- Attachment #1: Type: text/plain, Size: 6531 bytes --]
Sorry for the incorrect format of the patch. So I put the patch into the
attachment which generated by "git format-patch -M HEAD^". Please help
check, thanks.
Next, I shall try to find another mail address which can be accepted by
both China and our mailing list.
Thanks.
On 9/1/15 04:54, Chen Gang wrote:
> When failure occurs, we need not call khugepaged_enter_vma_merge() or
> validate_mm().
>
> Also simplify do_munmap(): declare 'error' 1 time instead of 2 times in
> sub-blocks.
>
> Signed-off-by: Chen Gang <gang.chen.5i5j@gmail.com>
> ---
> mm/mmap.c | 116 +++++++++++++++++++++++++++++++-------------------------------
> 1 file changed, 58 insertions(+), 58 deletions(-)
>
> diff --git a/mm/mmap.c b/mm/mmap.c
> index df6d5f0..d32199a 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -2182,10 +2182,9 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
> if (address < PAGE_ALIGN(address+4))
> address = PAGE_ALIGN(address+4);
> else {
> - vma_unlock_anon_vma(vma);
> - return -ENOMEM;
> + error = -ENOMEM;
> + goto err;
> }
> - error = 0;
>
> /* Somebody else might have raced and expanded it already */
> if (address> vma->vm_end) {
> @@ -2194,38 +2193,39 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
> size = address - vma->vm_start;
> grow = (address - vma->vm_end)>> PAGE_SHIFT;
>
> - error = -ENOMEM;
> - if (vma->vm_pgoff + (size>> PAGE_SHIFT)>= vma->vm_pgoff) {
> - error = acct_stack_growth(vma, size, grow);
> - if (!error) {
> - /*
> - * vma_gap_update() doesn't support concurrent
> - * updates, but we only hold a shared mmap_sem
> - * lock here, so we need to protect against
> - * concurrent vma expansions.
> - * vma_lock_anon_vma() doesn't help here, as
> - * we don't guarantee that all growable vmas
> - * in a mm share the same root anon vma.
> - * So, we reuse mm->page_table_lock to guard
> - * against concurrent vma expansions.
> - */
> - spin_lock(&vma->vm_mm->page_table_lock);
> - anon_vma_interval_tree_pre_update_vma(vma);
> - vma->vm_end = address;
> - anon_vma_interval_tree_post_update_vma(vma);
> - if (vma->vm_next)
> - vma_gap_update(vma->vm_next);
> - else
> - vma->vm_mm->highest_vm_end = address;
> - spin_unlock(&vma->vm_mm->page_table_lock);
> -
> - perf_event_mmap(vma);
> - }
> + if (vma->vm_pgoff + (size>> PAGE_SHIFT) < vma->vm_pgoff) {
> + error = -ENOMEM;
> + goto err;
> }
> + error = acct_stack_growth(vma, size, grow);
> + if (error)
> + goto err;
> + /*
> + * vma_gap_update() doesn't support concurrent updates, but we
> + * only hold a shared mmap_sem lock here, so we need to protect
> + * against concurrent vma expansions. vma_lock_anon_vma()
> + * doesn't help here, as we don't guarantee that all growable
> + * vmas in a mm share the same root anon vma. So, we reuse mm->
> + * page_table_lock to guard against concurrent vma expansions.
> + */
> + spin_lock(&vma->vm_mm->page_table_lock);
> + anon_vma_interval_tree_pre_update_vma(vma);
> + vma->vm_end = address;
> + anon_vma_interval_tree_post_update_vma(vma);
> + if (vma->vm_next)
> + vma_gap_update(vma->vm_next);
> + else
> + vma->vm_mm->highest_vm_end = address;
> + spin_unlock(&vma->vm_mm->page_table_lock);
> +
> + perf_event_mmap(vma);
> }
> vma_unlock_anon_vma(vma);
> khugepaged_enter_vma_merge(vma, vma->vm_flags);
> validate_mm(vma->vm_mm);
> + return 0;
> +err:
> + vma_unlock_anon_vma(vma);
> return error;
> }
> #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
> @@ -2265,36 +2265,37 @@ int expand_downwards(struct vm_area_struct *vma,
> size = vma->vm_end - address;
> grow = (vma->vm_start - address)>> PAGE_SHIFT;
>
> - error = -ENOMEM;
> - if (grow <= vma->vm_pgoff) {
> - error = acct_stack_growth(vma, size, grow);
> - if (!error) {
> - /*
> - * vma_gap_update() doesn't support concurrent
> - * updates, but we only hold a shared mmap_sem
> - * lock here, so we need to protect against
> - * concurrent vma expansions.
> - * vma_lock_anon_vma() doesn't help here, as
> - * we don't guarantee that all growable vmas
> - * in a mm share the same root anon vma.
> - * So, we reuse mm->page_table_lock to guard
> - * against concurrent vma expansions.
> - */
> - spin_lock(&vma->vm_mm->page_table_lock);
> - anon_vma_interval_tree_pre_update_vma(vma);
> - vma->vm_start = address;
> - vma->vm_pgoff -= grow;
> - anon_vma_interval_tree_post_update_vma(vma);
> - vma_gap_update(vma);
> - spin_unlock(&vma->vm_mm->page_table_lock);
> -
> - perf_event_mmap(vma);
> - }
> + if (grow> vma->vm_pgoff) {
> + error = -ENOMEM;
> + goto err;
> }
> + error = acct_stack_growth(vma, size, grow);
> + if (error)
> + goto err;
> + /*
> + * vma_gap_update() doesn't support concurrent updates, but we
> + * only hold a shared mmap_sem lock here, so we need to protect
> + * against concurrent vma expansions. vma_lock_anon_vma()
> + * doesn't help here, as we don't guarantee that all growable
> + * vmas in a mm share the same root anon vma. So, we reuse mm->
> + * page_table_lock to guard against concurrent vma expansions.
> + */
> + spin_lock(&vma->vm_mm->page_table_lock);
> + anon_vma_interval_tree_pre_update_vma(vma);
> + vma->vm_start = address;
> + vma->vm_pgoff -= grow;
> + anon_vma_interval_tree_post_update_vma(vma);
> + vma_gap_update(vma);
> + spin_unlock(&vma->vm_mm->page_table_lock);
> +
> + perf_event_mmap(vma);
> }
> vma_unlock_anon_vma(vma);
> khugepaged_enter_vma_merge(vma, vma->vm_flags);
> validate_mm(vma->vm_mm);
> + return 0;
> +err:
> + vma_unlock_anon_vma(vma);
> return error;
> }
>
> @@ -2542,6 +2543,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
> {
> unsigned long end;
> struct vm_area_struct *vma, *prev, *last;
> + int error;
>
> if ((start & ~PAGE_MASK) || start> TASK_SIZE || len> TASK_SIZE-start)
> return -EINVAL;
> @@ -2570,8 +2572,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
> * places tmp vma above, and higher split_vma places tmp vma below.
> */
> if (start> vma->vm_start) {
> - int error;
> -
> /*
> * Make sure that map_count on return from munmap() will
> * not exceed its limit; but let map_count go just above
> @@ -2589,7 +2589,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
> /* Does it split the last one? */
> last = find_vma(mm, end);
> if (last && end> last->vm_start) {
> - int error = __split_vma(mm, last, end, 1);
> + error = __split_vma(mm, last, end, 1);
> if (error)
> return error;
> }
> --
> 1.9.3
>
>
>
--
Chen Gang
Open, share, and attitude like air, water, and life which God blessed
[-- Attachment #2: 0001-mm-mmap.c-Only-call-vma_unlock_anon_vm-for-failure-i.patch --]
[-- Type: text/plain, Size: 6565 bytes --]
From 87b8632f50e6ff3d09289ae6a76bd71fc2ecb074 Mon Sep 17 00:00:00 2001
From: Chen Gang <gang.chen.5i5j@gmail.com>
Date: Tue, 1 Sep 2015 04:40:56 +0800
Subject: [PATCH] mm/mmap.c: Only call vma_unlock_anon_vm() for failure in
expand_upwards() and expand_downwards()
When failure occurs, we need not call khugepaged_enter_vma_merge() or
validate_mm().
Also simplify do_munmap(): declare 'error' 1 time instead of 2 times in
sub-blocks.
Signed-off-by: Chen Gang <gang.chen.5i5j@gmail.com>
---
mm/mmap.c | 116 +++++++++++++++++++++++++++++++-------------------------------
1 file changed, 58 insertions(+), 58 deletions(-)
diff --git a/mm/mmap.c b/mm/mmap.c
index df6d5f0..d32199a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2182,10 +2182,9 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
if (address < PAGE_ALIGN(address+4))
address = PAGE_ALIGN(address+4);
else {
- vma_unlock_anon_vma(vma);
- return -ENOMEM;
+ error = -ENOMEM;
+ goto err;
}
- error = 0;
/* Somebody else might have raced and expanded it already */
if (address > vma->vm_end) {
@@ -2194,38 +2193,39 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
size = address - vma->vm_start;
grow = (address - vma->vm_end) >> PAGE_SHIFT;
- error = -ENOMEM;
- if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
- error = acct_stack_growth(vma, size, grow);
- if (!error) {
- /*
- * vma_gap_update() doesn't support concurrent
- * updates, but we only hold a shared mmap_sem
- * lock here, so we need to protect against
- * concurrent vma expansions.
- * vma_lock_anon_vma() doesn't help here, as
- * we don't guarantee that all growable vmas
- * in a mm share the same root anon vma.
- * So, we reuse mm->page_table_lock to guard
- * against concurrent vma expansions.
- */
- spin_lock(&vma->vm_mm->page_table_lock);
- anon_vma_interval_tree_pre_update_vma(vma);
- vma->vm_end = address;
- anon_vma_interval_tree_post_update_vma(vma);
- if (vma->vm_next)
- vma_gap_update(vma->vm_next);
- else
- vma->vm_mm->highest_vm_end = address;
- spin_unlock(&vma->vm_mm->page_table_lock);
-
- perf_event_mmap(vma);
- }
+ if (vma->vm_pgoff + (size >> PAGE_SHIFT) < vma->vm_pgoff) {
+ error = -ENOMEM;
+ goto err;
}
+ error = acct_stack_growth(vma, size, grow);
+ if (error)
+ goto err;
+ /*
+ * vma_gap_update() doesn't support concurrent updates, but we
+ * only hold a shared mmap_sem lock here, so we need to protect
+ * against concurrent vma expansions. vma_lock_anon_vma()
+ * doesn't help here, as we don't guarantee that all growable
+ * vmas in a mm share the same root anon vma. So, we reuse mm->
+ * page_table_lock to guard against concurrent vma expansions.
+ */
+ spin_lock(&vma->vm_mm->page_table_lock);
+ anon_vma_interval_tree_pre_update_vma(vma);
+ vma->vm_end = address;
+ anon_vma_interval_tree_post_update_vma(vma);
+ if (vma->vm_next)
+ vma_gap_update(vma->vm_next);
+ else
+ vma->vm_mm->highest_vm_end = address;
+ spin_unlock(&vma->vm_mm->page_table_lock);
+
+ perf_event_mmap(vma);
}
vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(vma->vm_mm);
+ return 0;
+err:
+ vma_unlock_anon_vma(vma);
return error;
}
#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -2265,36 +2265,37 @@ int expand_downwards(struct vm_area_struct *vma,
size = vma->vm_end - address;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
- error = -ENOMEM;
- if (grow <= vma->vm_pgoff) {
- error = acct_stack_growth(vma, size, grow);
- if (!error) {
- /*
- * vma_gap_update() doesn't support concurrent
- * updates, but we only hold a shared mmap_sem
- * lock here, so we need to protect against
- * concurrent vma expansions.
- * vma_lock_anon_vma() doesn't help here, as
- * we don't guarantee that all growable vmas
- * in a mm share the same root anon vma.
- * So, we reuse mm->page_table_lock to guard
- * against concurrent vma expansions.
- */
- spin_lock(&vma->vm_mm->page_table_lock);
- anon_vma_interval_tree_pre_update_vma(vma);
- vma->vm_start = address;
- vma->vm_pgoff -= grow;
- anon_vma_interval_tree_post_update_vma(vma);
- vma_gap_update(vma);
- spin_unlock(&vma->vm_mm->page_table_lock);
-
- perf_event_mmap(vma);
- }
+ if (grow > vma->vm_pgoff) {
+ error = -ENOMEM;
+ goto err;
}
+ error = acct_stack_growth(vma, size, grow);
+ if (error)
+ goto err;
+ /*
+ * vma_gap_update() doesn't support concurrent updates, but we
+ * only hold a shared mmap_sem lock here, so we need to protect
+ * against concurrent vma expansions. vma_lock_anon_vma()
+ * doesn't help here, as we don't guarantee that all growable
+ * vmas in a mm share the same root anon vma. So, we reuse mm->
+ * page_table_lock to guard against concurrent vma expansions.
+ */
+ spin_lock(&vma->vm_mm->page_table_lock);
+ anon_vma_interval_tree_pre_update_vma(vma);
+ vma->vm_start = address;
+ vma->vm_pgoff -= grow;
+ anon_vma_interval_tree_post_update_vma(vma);
+ vma_gap_update(vma);
+ spin_unlock(&vma->vm_mm->page_table_lock);
+
+ perf_event_mmap(vma);
}
vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(vma->vm_mm);
+ return 0;
+err:
+ vma_unlock_anon_vma(vma);
return error;
}
@@ -2542,6 +2543,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
{
unsigned long end;
struct vm_area_struct *vma, *prev, *last;
+ int error;
if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
return -EINVAL;
@@ -2570,8 +2572,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
* places tmp vma above, and higher split_vma places tmp vma below.
*/
if (start > vma->vm_start) {
- int error;
-
/*
* Make sure that map_count on return from munmap() will
* not exceed its limit; but let map_count go just above
@@ -2589,7 +2589,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* Does it split the last one? */
last = find_vma(mm, end);
if (last && end > last->vm_start) {
- int error = __split_vma(mm, last, end, 1);
+ error = __split_vma(mm, last, end, 1);
if (error)
return error;
}
--
1.9.3
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH] mm/mmap.c: Only call vma_unlock_anon_vm() when failure occurs in expand_upwards() and expand_downwards()
@ 2015-09-02 21:36 gang.chen.5i5j
0 siblings, 0 replies; 7+ messages in thread
From: gang.chen.5i5j @ 2015-09-02 21:36 UTC (permalink / raw)
To: Linux Memory, kernel mailing list; +Cc: gchen_5i5j
At present, I can use git client via 21cn mail address. Hope it can be
accepted by our mailing list.
Thanks.
On 9/1/15 21:49, Chen Gang wrote:
>
> Sorry for the incorrect format of the patch. So I put the patch into the
> attachment which generated by "git format-patch -M HEAD^". Please help
> check, thanks.
>
> Next, I shall try to find another mail address which can be accepted by
> both China and our mailing list.
>
> Thanks.
>
> On 9/1/15 04:54, Chen Gang wrote:
>> When failure occurs, we need not call khugepaged_enter_vma_merge() or
>> validate_mm().
>>
>> Also simplify do_munmap(): declare 'error' 1 time instead of 2 times in
>> sub-blocks.
>>
>> Signed-off-by: Chen Gang <gang.chen.5i5j@gmail.com>
>> ---
>> mm/mmap.c | 116 +++++++++++++++++++++++++++++++-------------------------------
>> 1 file changed, 58 insertions(+), 58 deletions(-)
>>
>> diff --git a/mm/mmap.c b/mm/mmap.c
>> index df6d5f0..d32199a 100644
>> --- a/mm/mmap.c
>> +++ b/mm/mmap.c
>> @@ -2182,10 +2182,9 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
>> if (address < PAGE_ALIGN(address+4))
>> address = PAGE_ALIGN(address+4);
>> else {
>> - vma_unlock_anon_vma(vma);
>> - return -ENOMEM;
>> + error = -ENOMEM;
>> + goto err;
>> }
>> - error = 0;
>>
>> /* Somebody else might have raced and expanded it already */
>> if (address> vma->vm_end) {
>> @@ -2194,38 +2193,39 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
>> size = address - vma->vm_start;
>> grow = (address - vma->vm_end)>> PAGE_SHIFT;
>>
>> - error = -ENOMEM;
>> - if (vma->vm_pgoff + (size>> PAGE_SHIFT)>= vma->vm_pgoff) {
>> - error = acct_stack_growth(vma, size, grow);
>> - if (!error) {
>> - /*
>> - * vma_gap_update() doesn't support concurrent
>> - * updates, but we only hold a shared mmap_sem
>> - * lock here, so we need to protect against
>> - * concurrent vma expansions.
>> - * vma_lock_anon_vma() doesn't help here, as
>> - * we don't guarantee that all growable vmas
>> - * in a mm share the same root anon vma.
>> - * So, we reuse mm->page_table_lock to guard
>> - * against concurrent vma expansions.
>> - */
>> - spin_lock(&vma->vm_mm->page_table_lock);
>> - anon_vma_interval_tree_pre_update_vma(vma);
>> - vma->vm_end = address;
>> - anon_vma_interval_tree_post_update_vma(vma);
>> - if (vma->vm_next)
>> - vma_gap_update(vma->vm_next);
>> - else
>> - vma->vm_mm->highest_vm_end = address;
>> - spin_unlock(&vma->vm_mm->page_table_lock);
>> -
>> - perf_event_mmap(vma);
>> - }
>> + if (vma->vm_pgoff + (size>> PAGE_SHIFT) < vma->vm_pgoff) {
>> + error = -ENOMEM;
>> + goto err;
>> }
>> + error = acct_stack_growth(vma, size, grow);
>> + if (error)
>> + goto err;
>> + /*
>> + * vma_gap_update() doesn't support concurrent updates, but we
>> + * only hold a shared mmap_sem lock here, so we need to protect
>> + * against concurrent vma expansions. vma_lock_anon_vma()
>> + * doesn't help here, as we don't guarantee that all growable
>> + * vmas in a mm share the same root anon vma. So, we reuse mm->
>> + * page_table_lock to guard against concurrent vma expansions.
>> + */
>> + spin_lock(&vma->vm_mm->page_table_lock);
>> + anon_vma_interval_tree_pre_update_vma(vma);
>> + vma->vm_end = address;
>> + anon_vma_interval_tree_post_update_vma(vma);
>> + if (vma->vm_next)
>> + vma_gap_update(vma->vm_next);
>> + else
>> + vma->vm_mm->highest_vm_end = address;
>> + spin_unlock(&vma->vm_mm->page_table_lock);
>> +
>> + perf_event_mmap(vma);
>> }
>> vma_unlock_anon_vma(vma);
>> khugepaged_enter_vma_merge(vma, vma->vm_flags);
>> validate_mm(vma->vm_mm);
>> + return 0;
>> +err:
>> + vma_unlock_anon_vma(vma);
>> return error;
>> }
>> #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
>> @@ -2265,36 +2265,37 @@ int expand_downwards(struct vm_area_struct *vma,
>> size = vma->vm_end - address;
>> grow = (vma->vm_start - address)>> PAGE_SHIFT;
>>
>> - error = -ENOMEM;
>> - if (grow <= vma->vm_pgoff) {
>> - error = acct_stack_growth(vma, size, grow);
>> - if (!error) {
>> - /*
>> - * vma_gap_update() doesn't support concurrent
>> - * updates, but we only hold a shared mmap_sem
>> - * lock here, so we need to protect against
>> - * concurrent vma expansions.
>> - * vma_lock_anon_vma() doesn't help here, as
>> - * we don't guarantee that all growable vmas
>> - * in a mm share the same root anon vma.
>> - * So, we reuse mm->page_table_lock to guard
>> - * against concurrent vma expansions.
>> - */
>> - spin_lock(&vma->vm_mm->page_table_lock);
>> - anon_vma_interval_tree_pre_update_vma(vma);
>> - vma->vm_start = address;
>> - vma->vm_pgoff -= grow;
>> - anon_vma_interval_tree_post_update_vma(vma);
>> - vma_gap_update(vma);
>> - spin_unlock(&vma->vm_mm->page_table_lock);
>> -
>> - perf_event_mmap(vma);
>> - }
>> + if (grow> vma->vm_pgoff) {
>> + error = -ENOMEM;
>> + goto err;
>> }
>> + error = acct_stack_growth(vma, size, grow);
>> + if (error)
>> + goto err;
>> + /*
>> + * vma_gap_update() doesn't support concurrent updates, but we
>> + * only hold a shared mmap_sem lock here, so we need to protect
>> + * against concurrent vma expansions. vma_lock_anon_vma()
>> + * doesn't help here, as we don't guarantee that all growable
>> + * vmas in a mm share the same root anon vma. So, we reuse mm->
>> + * page_table_lock to guard against concurrent vma expansions.
>> + */
>> + spin_lock(&vma->vm_mm->page_table_lock);
>> + anon_vma_interval_tree_pre_update_vma(vma);
>> + vma->vm_start = address;
>> + vma->vm_pgoff -= grow;
>> + anon_vma_interval_tree_post_update_vma(vma);
>> + vma_gap_update(vma);
>> + spin_unlock(&vma->vm_mm->page_table_lock);
>> +
>> + perf_event_mmap(vma);
>> }
>> vma_unlock_anon_vma(vma);
>> khugepaged_enter_vma_merge(vma, vma->vm_flags);
>> validate_mm(vma->vm_mm);
>> + return 0;
>> +err:
>> + vma_unlock_anon_vma(vma);
>> return error;
>> }
>>
>> @@ -2542,6 +2543,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
>> {
>> unsigned long end;
>> struct vm_area_struct *vma, *prev, *last;
>> + int error;
>>
>> if ((start & ~PAGE_MASK) || start> TASK_SIZE || len> TASK_SIZE-start)
>> return -EINVAL;
>> @@ -2570,8 +2572,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
>> * places tmp vma above, and higher split_vma places tmp vma below.
>> */
>> if (start> vma->vm_start) {
>> - int error;
>> -
>> /*
>> * Make sure that map_count on return from munmap() will
>> * not exceed its limit; but let map_count go just above
>> @@ -2589,7 +2589,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
>> /* Does it split the last one? */
>> last = find_vma(mm, end);
>> if (last && end> last->vm_start) {
>> - int error = __split_vma(mm, last, end, 1);
>> + error = __split_vma(mm, last, end, 1);
>> if (error)
>> return error;
>> }
>> --
>> 1.9.3
>>
>>
>>
>
> --
> Chen Gang
>
> Open, share, and attitude like air, water, and life which God blessed
>
>
--
Chen Gang
Open, share, and attitude like air, water, and life which God blessed
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] mm/mmap.c: Only call vma_unlock_anon_vm() when failure occurs in expand_upwards() and expand_downwards()
[not found] ` <55E96E01.5010605@hotmail.com>
@ 2015-09-04 10:09 ` Chen Gang
[not found] ` <55EAC021.3080205@hotmail.com>
0 siblings, 1 reply; 7+ messages in thread
From: Chen Gang @ 2015-09-04 10:09 UTC (permalink / raw)
To: Andrew Morton, Michal Hocko; +Cc: Linux Memory, kernel mailing list, Chen Gang
Hello all:
It seems 21cn mail can be accepted by our mailing list (I didn't receive
any rejective notification mail from our mailing list).
If it is necessary to send the patch again via git client, please let me
know, I shall try to send it again with my 21cn mail address via git
client.
Welcome any ideas, suggestions, and completions.
Thanks.
On 9/1/15 21:49, Chen Gang wrote:
>
> Sorry for the incorrect format of the patch. So I put the patch into the
> attachment which generated by "git format-patch -M HEAD^". Please help
> check, thanks.
>
> Next, I shall try to find another mail address which can be accepted by
> both China and our mailing list.
>
> Thanks.
>
Thanks.
--
Chen Gang (陈刚)
Open, share, and attitude like air, water, and life which God blessed
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] mm/mmap.c: Only call vma_unlock_anon_vm() when failure occurs in expand_upwards() and expand_downwards()
[not found] ` <55EAC021.3080205@hotmail.com>
@ 2015-09-05 10:11 ` Chen Gang
2015-09-07 7:24 ` Michal Hocko
0 siblings, 1 reply; 7+ messages in thread
From: Chen Gang @ 2015-09-05 10:11 UTC (permalink / raw)
To: Andrew Morton, Michal Hocko; +Cc: Linux Memory, kernel mailing list, Chen Gang
Hello All:
I have send 2 new patches about mm, and 1 patch for arch metag via my
21cn mail. Could any members help to tell me, whether he/she have
received the patches or not?
At present:
- For Chinese site: qq, sohu, sina, 163, 21cn ... it seems only 21cn OK
(qq is not accepted, sohu, sina, and 163 supports plain text badly).
- gmail cann't send patches (but can receive mail via qq mail address),
hotmail can only send patches from website.
- If 21cn mail does not work well, I guess, the only way for me is
"send patch in attachment in my hotmail website".
Welcome any ideas, suggestions, or completion.
Thanks.
On 9/4/15 18:09, Chen Gang wrote:
> Hello all:
>
> It seems 21cn mail can be accepted by our mailing list (I didn't receive
> any rejective notification mail from our mailing list).
>
> If it is necessary to send the patch again via git client, please let me
> know, I shall try to send it again with my 21cn mail address via git
> client.
>
> Welcome any ideas, suggestions, and completions.
>
> Thanks.
>
> On 9/1/15 21:49, Chen Gang wrote:
>>
>> Sorry for the incorrect format of the patch. So I put the patch into the
>> attachment which generated by "git format-patch -M HEAD^". Please help
>> check, thanks.
>>
>> Next, I shall try to find another mail address which can be accepted by
>> both China and our mailing list.
>>
>> Thanks.
>>
>
>
> Thanks.
> --
> Chen Gang (陈刚)
>
> Open, share, and attitude like air, water, and life which God blessed
>
>
--
Chen Gang (陈刚)
Open, share, and attitude like air, water, and life which God blessed
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] mm/mmap.c: Only call vma_unlock_anon_vm() when failure occurs in expand_upwards() and expand_downwards()
2015-09-05 10:11 ` Chen Gang
@ 2015-09-07 7:24 ` Michal Hocko
[not found] ` <55EEF4B4.5010205@hotmail.com>
0 siblings, 1 reply; 7+ messages in thread
From: Michal Hocko @ 2015-09-07 7:24 UTC (permalink / raw)
To: Chen Gang; +Cc: Andrew Morton, Linux Memory, kernel mailing list, Chen Gang
On Sat 05-09-15 18:11:40, Chen Gang wrote:
> Hello All:
>
> I have send 2 new patches about mm, and 1 patch for arch metag via my
> 21cn mail. Could any members help to tell me, whether he/she have
> received the patches or not?
Yes they seem to be in the archive.
http://lkml.kernel.org/r/COL130-W64A6555222F8CEDA513171B9560%40phx.gbl
http://lkml.kernel.org/r/COL130-W16C972B0457D5C7C9CB06B9560%40phx.gbl
You can check that easily by http://lkml.kernel.org/r/$MESSAGE_ID
--
Michal Hocko
SUSE Labs
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] mm/mmap.c: Only call vma_unlock_anon_vm() when failure occurs in expand_upwards() and expand_downwards()
[not found] ` <55EEF4B4.5010205@hotmail.com>
@ 2015-09-08 14:44 ` Chen Gang
0 siblings, 0 replies; 7+ messages in thread
From: Chen Gang @ 2015-09-08 14:44 UTC (permalink / raw)
To: Michal Hocko
Cc: Andrew Morton, Linux Memory, kernel mailing list, Max Filippov
[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset="gb2312", Size: 1410 bytes --]
On 9/7/15 15:24, Michal Hocko wrote:
> On Sat 05-09-15 18:11:40, Chen Gang wrote:
>> Hello All:
>>
>> I have send 2 new patches about mm, and 1 patch for arch metag via my
>> 21cn mail. Could any members help to tell me, whether he/she have
>> received the patches or not?
>
> Yes they seem to be in the archive.
> http://lkml.kernel.org/r/COL130-W64A6555222F8CEDA513171B9560%40phx.gbl
> http://lkml.kernel.org/r/COL130-W16C972B0457D5C7C9CB06B9560%40phx.gbl
>
> You can check that easily by http://lkml.kernel.org/r/$MESSAGE_ID
>
Thank you very much for your reply. :-)
Excuse me, I can not open http://lkml.kernel.org/r/..., but I can open
https://lkml.org/lkml/2015/9/3 (or another date). Under this web site,
I can not find any patches which I sent (I sent them in 2015-09-03/04).
In 2015-09-05, a qemu member told me to check the patches on website, so
I could know myself whether patches are actually sent (I met almost the
same issue for qemu, so I consult them, too). So found https://lkml.org
So I sent kernel patches again with the attachments via my hotmail in 20
15-09-05/06. I guess, the patches you saw are sent via my hotmail in 201
5-09-05/06. Please help check, again, thanks.
Thanks.
--
Chen Gang (³Â¸Õ)
Open, share, and attitude like air, water, and life which God blessed
N§²æìr¸zǧu©²Æ {\béì¹»\x1c®&Þ)îÆi¢Ø^nr¶Ý¢j$½§$¢¸\x05¢¹¨è§~'.)îÄÃ,yèm¶ÿÃ\f%{±j+ðèצj)Z·
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2015-09-08 14:44 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-09-02 21:36 [PATCH] mm/mmap.c: Only call vma_unlock_anon_vm() when failure occurs in expand_upwards() and expand_downwards() gang.chen.5i5j
-- strict thread matches above, loose matches on Subject: below --
2015-08-31 20:54 Chen Gang
[not found] ` <55E5AD17.6060901@hotmail.com>
2015-09-01 13:49 ` Chen Gang
[not found] ` <55E96E01.5010605@hotmail.com>
2015-09-04 10:09 ` Chen Gang
[not found] ` <55EAC021.3080205@hotmail.com>
2015-09-05 10:11 ` Chen Gang
2015-09-07 7:24 ` Michal Hocko
[not found] ` <55EEF4B4.5010205@hotmail.com>
2015-09-08 14:44 ` Chen Gang
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).