* [PATCH RFC V1] mm: convert rcu_read_lock() to srcu_read_lock(), thus allowing to sleep in callbacks
@ 2012-02-05 16:29 sagig
2012-02-05 20:27 ` Konstantin Khlebnikov
0 siblings, 1 reply; 5+ messages in thread
From: sagig @ 2012-02-05 16:29 UTC (permalink / raw)
To: aarcange; +Cc: ogerlitz, gleb, oren, linux-mm
Now that anon_vma lock and i_mmap_mutex are both sleepable mutex, it is possible to schedule inside invalidation callbacks
(such as invalidate_page, invalidate_range_start/end and change_pte) .
This is essential for a scheduling HW sync in RDMA drivers which apply on demand paging methods.
Signed-off-by: sagi grimberg <sagig@mellanox.co.il>
---
changes from V0:
1. srcu_struct should be shared and not allocated in each callback - removed from callbacks
2. added srcu_struct under mmu_notifier_mm
3. init_srcu_struct when creating mmu_notifier_mm
4. srcu_cleanup when destroying mmu_notifier_mm
include/linux/mmu_notifier.h | 3 +++
mm/mmu_notifier.c | 23 +++++++++++++++--------
2 files changed, 18 insertions(+), 8 deletions(-)
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 1d1b1e1..f3d6f30 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -4,6 +4,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/mm_types.h>
+#include <linux/srcu.h>
struct mmu_notifier;
struct mmu_notifier_ops;
@@ -21,6 +22,8 @@ struct mmu_notifier_mm {
struct hlist_head list;
/* to serialize the list modifications and hlist_unhashed */
spinlock_t lock;
+ /* to enable sleeping in callbacks */
+ struct srcu_struct srcu;
};
struct mmu_notifier_ops {
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 9a611d3..3d4f007 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -123,10 +123,11 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
pte_t pte)
{
+ int idx;
struct mmu_notifier *mn;
struct hlist_node *n;
- rcu_read_lock();
+ idx = srcu_read_lock(&mm->mmu_notifier_mm->srcu);
hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->change_pte)
mn->ops->change_pte(mn, mm, address, pte);
@@ -137,49 +138,52 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
else if (mn->ops->invalidate_page)
mn->ops->invalidate_page(mn, mm, address);
}
- rcu_read_unlock();
+ srcu_read_unlock(&mm->mmu_notifier_mm->srcu, idx);
}
void __mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address)
{
+ int idx;
struct mmu_notifier *mn;
struct hlist_node *n;
- rcu_read_lock();
+ idx = srcu_read_lock(&mm->mmu_notifier_mm->srcu);
hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->invalidate_page)
mn->ops->invalidate_page(mn, mm, address);
}
- rcu_read_unlock();
+ srcu_read_unlock(&mm->mmu_notifier_mm->srcu, idx);
}
void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
+ int idx;
struct mmu_notifier *mn;
struct hlist_node *n;
- rcu_read_lock();
+ idx = srcu_read_lock(&mm->mmu_notifier_mm->srcu);
hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->invalidate_range_start)
mn->ops->invalidate_range_start(mn, mm, start, end);
}
- rcu_read_unlock();
+ srcu_read_unlock(&mm->mmu_notifier_mm->srcu, idx);
}
void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
+ int idx;
struct mmu_notifier *mn;
struct hlist_node *n;
- rcu_read_lock();
+ idx = srcu_read_lock(&mm->mmu_notifier_mm->srcu);
hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->invalidate_range_end)
mn->ops->invalidate_range_end(mn, mm, start, end);
}
- rcu_read_unlock();
+ srcu_read_unlock(&mm->mmu_notifier_mm->srcu, idx);
}
static int do_mmu_notifier_register(struct mmu_notifier *mn,
@@ -204,6 +208,8 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
if (!mm_has_notifiers(mm)) {
INIT_HLIST_HEAD(&mmu_notifier_mm->list);
+ if (init_srcu_struct(&mmu_notifier_mm->srcu))
+ goto out_cleanup;
spin_lock_init(&mmu_notifier_mm->lock);
mm->mmu_notifier_mm = mmu_notifier_mm;
mmu_notifier_mm = NULL;
@@ -266,6 +272,7 @@ EXPORT_SYMBOL_GPL(__mmu_notifier_register);
void __mmu_notifier_mm_destroy(struct mm_struct *mm)
{
BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
+ cleanup_srcu_struct(&mm->mmu_notifier_mm->srcu);
kfree(mm->mmu_notifier_mm);
mm->mmu_notifier_mm = LIST_POISON1; /* debug */
}
--
1.7.8.2
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH RFC V1] mm: convert rcu_read_lock() to srcu_read_lock(), thus allowing to sleep in callbacks
2012-02-05 16:29 [PATCH RFC V1] mm: convert rcu_read_lock() to srcu_read_lock(), thus allowing to sleep in callbacks sagig
@ 2012-02-05 20:27 ` Konstantin Khlebnikov
2012-02-06 9:11 ` sagig
2012-02-06 9:11 ` sagig
0 siblings, 2 replies; 5+ messages in thread
From: Konstantin Khlebnikov @ 2012-02-05 20:27 UTC (permalink / raw)
To: sagig@mellanox.com
Cc: aarcange@redhat.com, ogerlitz@mellanox.com, gleb@redhat.com,
oren@mellanox.com, linux-mm@kvack.org
sagig@mellanox.com wrote:
> Now that anon_vma lock and i_mmap_mutex are both sleepable mutex, it is possible to schedule inside invalidation callbacks
> (such as invalidate_page, invalidate_range_start/end and change_pte) .
> This is essential for a scheduling HW sync in RDMA drivers which apply on demand paging methods.
>
> Signed-off-by: sagi grimberg<sagig@mellanox.co.il>
Ok, this is better, but it still does not work =)
Nobody synchronize with this srcu. There at least two candidates:
mmu_notifier_release() and mmu_notifier_unregister().
They call synchronize_rcu(), you must replace it with synchronize_srcu().
> ---
> changes from V0:
> 1. srcu_struct should be shared and not allocated in each callback - removed from callbacks
> 2. added srcu_struct under mmu_notifier_mm
> 3. init_srcu_struct when creating mmu_notifier_mm
> 4. srcu_cleanup when destroying mmu_notifier_mm
>
> @@ -204,6 +208,8 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
>
> if (!mm_has_notifiers(mm)) {
> INIT_HLIST_HEAD(&mmu_notifier_mm->list);
> + if (init_srcu_struct(&mmu_notifier_mm->srcu))
> + goto out_cleanup;
move it upper, out of mm->mmap_sem lock. and fix error path.
> spin_lock_init(&mmu_notifier_mm->lock);
> mm->mmu_notifier_mm = mmu_notifier_mm;
> mmu_notifier_mm = NULL;
> @@ -266,6 +272,7 @@ EXPORT_SYMBOL_GPL(__mmu_notifier_register);
> void __mmu_notifier_mm_destroy(struct mm_struct *mm)
> {
> BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
> + cleanup_srcu_struct(&mm->mmu_notifier_mm->srcu);
> kfree(mm->mmu_notifier_mm);
> mm->mmu_notifier_mm = LIST_POISON1; /* debug */
> }
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH RFC V1] mm: convert rcu_read_lock() to srcu_read_lock(), thus allowing to sleep in callbacks
2012-02-05 20:27 ` Konstantin Khlebnikov
@ 2012-02-06 9:11 ` sagig
2012-02-06 11:29 ` Konstantin Khlebnikov
2012-02-06 9:11 ` sagig
1 sibling, 1 reply; 5+ messages in thread
From: sagig @ 2012-02-06 9:11 UTC (permalink / raw)
To: Konstantin Khlebnikov
Cc: Andrea Arcangeli, Or Gerlitz, gleb@redhat.com, Oren Duer,
linux-mm@kvack.org
On 2/5/2012 10:27 PM, Konstantin Khlebnikov wrote:
> sagig@mellanox.com wrote:
>> Now that anon_vma lock and i_mmap_mutex are both sleepable mutex, it
>> is possible to schedule inside invalidation callbacks
>> (such as invalidate_page, invalidate_range_start/end and change_pte) .
>> This is essential for a scheduling HW sync in RDMA drivers which
>> apply on demand paging methods.
>>
>> Signed-off-by: sagi grimberg<sagig@mellanox.co.il>
>
> Ok, this is better, but it still does not work =)
> Nobody synchronize with this srcu. There at least two candidates:
> mmu_notifier_release() and mmu_notifier_unregister().
> They call synchronize_rcu(), you must replace it with synchronize_srcu().
>
Yes, I understand - will fix.
>> ---
>> changes from V0:
>> 1. srcu_struct should be shared and not allocated in each callback
>> - removed from callbacks
>> 2. added srcu_struct under mmu_notifier_mm
>> 3. init_srcu_struct when creating mmu_notifier_mm
>> 4. srcu_cleanup when destroying mmu_notifier_mm
>>
>
>> @@ -204,6 +208,8 @@ static int do_mmu_notifier_register(struct
>> mmu_notifier *mn,
>>
>> if (!mm_has_notifiers(mm)) {
>> INIT_HLIST_HEAD(&mmu_notifier_mm->list);
>> + if (init_srcu_struct(&mmu_notifier_mm->srcu))
>> + goto out_cleanup;
>
> move it upper, out of mm->mmap_sem lock. and fix error path.
>
Yes, I see that init_srcu_struct is using GFP_KERNEL allocations.
But what if do_mmu_notifier_register was called from
__mmu_notifier_register (where mmap_sem is held)? won't I end up with
the same violation?
Another question,
Just to understand - I should move only the init_srcu_struct() call out
of mmap_sem (will require checking !mm_has_notifiers(mm) twice)? or the
entire mmu_notifier_mm initialization?
>
>> spin_lock_init(&mmu_notifier_mm->lock);
>> mm->mmu_notifier_mm = mmu_notifier_mm;
>> mmu_notifier_mm = NULL;
>> @@ -266,6 +272,7 @@ EXPORT_SYMBOL_GPL(__mmu_notifier_register);
>> void __mmu_notifier_mm_destroy(struct mm_struct *mm)
>> {
>> BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
>> + cleanup_srcu_struct(&mm->mmu_notifier_mm->srcu);
>> kfree(mm->mmu_notifier_mm);
>> mm->mmu_notifier_mm = LIST_POISON1; /* debug */
>> }
>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH RFC V1] mm: convert rcu_read_lock() to srcu_read_lock(), thus allowing to sleep in callbacks
2012-02-05 20:27 ` Konstantin Khlebnikov
2012-02-06 9:11 ` sagig
@ 2012-02-06 9:11 ` sagig
1 sibling, 0 replies; 5+ messages in thread
From: sagig @ 2012-02-06 9:11 UTC (permalink / raw)
To: Konstantin Khlebnikov
Cc: Andrea Arcangeli, Or Gerlitz, gleb@redhat.com, Oren Duer,
linux-mm@kvack.org
On 2/5/2012 10:27 PM, Konstantin Khlebnikov wrote:
> sagig@mellanox.com wrote:
>> Now that anon_vma lock and i_mmap_mutex are both sleepable mutex, it
>> is possible to schedule inside invalidation callbacks
>> (such as invalidate_page, invalidate_range_start/end and change_pte) .
>> This is essential for a scheduling HW sync in RDMA drivers which
>> apply on demand paging methods.
>>
>> Signed-off-by: sagi grimberg<sagig@mellanox.co.il>
>
> Ok, this is better, but it still does not work =)
> Nobody synchronize with this srcu. There at least two candidates:
> mmu_notifier_release() and mmu_notifier_unregister().
> They call synchronize_rcu(), you must replace it with synchronize_srcu().
>
Yes, I understand - will fix.
>> ---
>> changes from V0:
>> 1. srcu_struct should be shared and not allocated in each callback
>> - removed from callbacks
>> 2. added srcu_struct under mmu_notifier_mm
>> 3. init_srcu_struct when creating mmu_notifier_mm
>> 4. srcu_cleanup when destroying mmu_notifier_mm
>>
>
>> @@ -204,6 +208,8 @@ static int do_mmu_notifier_register(struct
>> mmu_notifier *mn,
>>
>> if (!mm_has_notifiers(mm)) {
>> INIT_HLIST_HEAD(&mmu_notifier_mm->list);
>> + if (init_srcu_struct(&mmu_notifier_mm->srcu))
>> + goto out_cleanup;
>
> move it upper, out of mm->mmap_sem lock. and fix error path.
>
Yes, I see that init_srcu_struct is using GFP_KERNEL allocations.
But what if do_mmu_notifier_register was called from
__mmu_notifier_register (where mmap_sem is held)? won't I end up with
the same violation?
Another question,
Just to understand - I should move only the init_srcu_struct() call out
of mmap_sem (will require checking !mm_has_notifiers(mm) twice)? or the
entire mmu_notifier_mm initialization?
>
>> spin_lock_init(&mmu_notifier_mm->lock);
>> mm->mmu_notifier_mm = mmu_notifier_mm;
>> mmu_notifier_mm = NULL;
>> @@ -266,6 +272,7 @@ EXPORT_SYMBOL_GPL(__mmu_notifier_register);
>> void __mmu_notifier_mm_destroy(struct mm_struct *mm)
>> {
>> BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
>> + cleanup_srcu_struct(&mm->mmu_notifier_mm->srcu);
>> kfree(mm->mmu_notifier_mm);
>> mm->mmu_notifier_mm = LIST_POISON1; /* debug */
>> }
>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH RFC V1] mm: convert rcu_read_lock() to srcu_read_lock(), thus allowing to sleep in callbacks
2012-02-06 9:11 ` sagig
@ 2012-02-06 11:29 ` Konstantin Khlebnikov
0 siblings, 0 replies; 5+ messages in thread
From: Konstantin Khlebnikov @ 2012-02-06 11:29 UTC (permalink / raw)
To: sagig
Cc: Andrea Arcangeli, Or Gerlitz, gleb@redhat.com, Oren Duer,
linux-mm@kvack.org
sagig wrote:
> On 2/5/2012 10:27 PM, Konstantin Khlebnikov wrote:
>> sagig@mellanox.com wrote:
>>> Now that anon_vma lock and i_mmap_mutex are both sleepable mutex, it
>>> is possible to schedule inside invalidation callbacks
>>> (such as invalidate_page, invalidate_range_start/end and change_pte) .
>>> This is essential for a scheduling HW sync in RDMA drivers which
>>> apply on demand paging methods.
>>>
>>> Signed-off-by: sagi grimberg<sagig@mellanox.co.il>
>>
>> Ok, this is better, but it still does not work =)
>> Nobody synchronize with this srcu. There at least two candidates:
>> mmu_notifier_release() and mmu_notifier_unregister().
>> They call synchronize_rcu(), you must replace it with synchronize_srcu().
>>
>
> Yes, I understand - will fix.
>
>>> ---
>>> changes from V0:
>>> 1. srcu_struct should be shared and not allocated in each callback
>>> - removed from callbacks
>>> 2. added srcu_struct under mmu_notifier_mm
>>> 3. init_srcu_struct when creating mmu_notifier_mm
>>> 4. srcu_cleanup when destroying mmu_notifier_mm
>>>
>>
>>> @@ -204,6 +208,8 @@ static int do_mmu_notifier_register(struct
>>> mmu_notifier *mn,
>>>
>>> if (!mm_has_notifiers(mm)) {
>>> INIT_HLIST_HEAD(&mmu_notifier_mm->list);
>>> + if (init_srcu_struct(&mmu_notifier_mm->srcu))
>>> + goto out_cleanup;
>>
>> move it upper, out of mm->mmap_sem lock. and fix error path.
>>
>
> Yes, I see that init_srcu_struct is using GFP_KERNEL allocations.
> But what if do_mmu_notifier_register was called from
> __mmu_notifier_register (where mmap_sem is held)? won't I end up with
> the same violation?
In this case, it is not strictly necessary, but allocation outside of locks
is usually better than under lock.
>
> Another question,
> Just to understand - I should move only the init_srcu_struct() call out
> of mmap_sem (will require checking !mm_has_notifiers(mm) twice)? or the
> entire mmu_notifier_mm initialization?
this code should do this steps:
* allocate new struct mmu_notifiler_mm with all sub-structures, like srcu.
* take locks
* try to install new mmu-notifier
* install our notifier into mmu-notifier
* release locks
* free new mmu-notifier and all sub-structures if it unused
This is very commonly used pattern, sometimes it has fast-paths, sometimes not.
Looks like in this case, there are usually only one notifier per-mm,
so newly allocated mmu-notifier unlikely to be released.
>
>>
>>> spin_lock_init(&mmu_notifier_mm->lock);
>>> mm->mmu_notifier_mm = mmu_notifier_mm;
>>> mmu_notifier_mm = NULL;
>>> @@ -266,6 +272,7 @@ EXPORT_SYMBOL_GPL(__mmu_notifier_register);
>>> void __mmu_notifier_mm_destroy(struct mm_struct *mm)
>>> {
>>> BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
>>> + cleanup_srcu_struct(&mm->mmu_notifier_mm->srcu);
>>> kfree(mm->mmu_notifier_mm);
>>> mm->mmu_notifier_mm = LIST_POISON1; /* debug */
>>> }
>>
>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2012-02-06 11:29 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-02-05 16:29 [PATCH RFC V1] mm: convert rcu_read_lock() to srcu_read_lock(), thus allowing to sleep in callbacks sagig
2012-02-05 20:27 ` Konstantin Khlebnikov
2012-02-06 9:11 ` sagig
2012-02-06 11:29 ` Konstantin Khlebnikov
2012-02-06 9:11 ` sagig
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).