From: Peter Zijlstra <a.p.zijlstra@chello.nl> To: Andrea Arcangeli <aarcange@redhat.com>, Avi Kivity <avi@redhat.com>, Thomas Gleixner <tglx@linutronix.de>, Rik van Riel <riel@redhat.com>, Ingo Molnar <mingo@elte.hu>, akpm@linux-fou Cc: linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org, Benjamin Herrenschmidt <benh@kernel.crashing.org>, David Miller <davem@davemloft.net>, Hugh Dickins <hugh.dickins@tiscali.co.uk>, Mel Gorman <mel@csn.ul.ie>, Nick Piggin <npiggin@kernel.dk>, Peter Zijlstra <a.p.zijlstra@chello.nl>, Paul McKenney <paulmck@linux.vnet.ibm.com>, Yanmin Zhang <yanmin_zhang@linux.intel.com>, Stephen Rothwell <sfr@canb.auug.org.au> Subject: [PATCH 18/20] mm: Convert i_mmap_lock and anon_vma->lock to mutexes Date: Mon, 18 Oct 2010 13:24:51 +0200 [thread overview] Message-ID: <20101018112951.234380099@chello.nl> (raw) In-Reply-To: 20101018112433.556591568@chello.nl [-- Attachment #1: mm-mutex.patch --] [-- Type: text/plain, Size: 19732 bytes --] Straight fwd conversion of i_mmap_lock and anon_vma->lock to mutexes. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> --- arch/x86/mm/hugetlbpage.c | 4 ++-- fs/gfs2/main.c | 2 +- fs/hugetlbfs/inode.c | 4 ++-- fs/inode.c | 2 +- fs/nilfs2/btnode.c | 2 +- include/linux/fs.h | 2 +- include/linux/mm.h | 2 +- include/linux/rmap.h | 12 ++++++------ kernel/fork.c | 4 ++-- mm/filemap_xip.c | 4 ++-- mm/fremap.c | 4 ++-- mm/hugetlb.c | 12 ++++++------ mm/memory-failure.c | 4 ++-- mm/memory.c | 14 +++++++------- mm/mmap.c | 18 +++++++++--------- mm/mremap.c | 4 ++-- mm/rmap.c | 22 +++++++++++----------- 17 files changed, 58 insertions(+), 58 deletions(-) Index: linux-2.6/arch/x86/mm/hugetlbpage.c =================================================================== --- linux-2.6.orig/arch/x86/mm/hugetlbpage.c +++ linux-2.6/arch/x86/mm/hugetlbpage.c @@ -72,7 +72,7 @@ static void huge_pmd_share(struct mm_str if (!vma_shareable(vma, addr)) return; - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { if (svma == vma) continue; @@ -97,7 +97,7 @@ static void huge_pmd_share(struct mm_str put_page(virt_to_page(spte)); spin_unlock(&mm->page_table_lock); out: - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); } /* Index: linux-2.6/fs/hugetlbfs/inode.c =================================================================== --- linux-2.6.orig/fs/hugetlbfs/inode.c +++ linux-2.6/fs/hugetlbfs/inode.c @@ -412,10 +412,10 @@ static int hugetlb_vmtruncate(struct ino pgoff = offset >> PAGE_SHIFT; i_size_write(inode, offset); - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); if (!prio_tree_empty(&mapping->i_mmap)) hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); truncate_hugepages(inode, offset); return 0; } Index: linux-2.6/fs/inode.c =================================================================== --- linux-2.6.orig/fs/inode.c +++ linux-2.6/fs/inode.c @@ -257,7 +257,7 @@ void inode_init_once(struct inode *inode INIT_LIST_HEAD(&inode->i_devices); INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); spin_lock_init(&inode->i_data.tree_lock); - spin_lock_init(&inode->i_data.i_mmap_lock); + mutex_init(&inode->i_data.i_mmap_lock); INIT_LIST_HEAD(&inode->i_data.private_list); spin_lock_init(&inode->i_data.private_lock); INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); Index: linux-2.6/include/linux/fs.h =================================================================== --- linux-2.6.orig/include/linux/fs.h +++ linux-2.6/include/linux/fs.h @@ -626,7 +626,7 @@ struct address_space { unsigned int i_mmap_writable;/* count VM_SHARED mappings */ struct prio_tree_root i_mmap; /* tree of private and shared mappings */ struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ - spinlock_t i_mmap_lock; /* protect tree, count, list */ + struct mutex i_mmap_lock; /* protect tree, count, list */ unsigned int truncate_count; /* Cover race condition with truncate */ unsigned long nrpages; /* number of total pages */ pgoff_t writeback_index;/* writeback starts here */ Index: linux-2.6/include/linux/mm.h =================================================================== --- linux-2.6.orig/include/linux/mm.h +++ linux-2.6/include/linux/mm.h @@ -758,7 +758,7 @@ struct zap_details { struct address_space *check_mapping; /* Check page->mapping if set */ pgoff_t first_index; /* Lowest page->index to unmap */ pgoff_t last_index; /* Highest page->index to unmap */ - spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */ + struct mutex *i_mmap_lock; /* For unmap_mapping_range: */ unsigned long truncate_count; /* Compare vm_truncate_count */ }; Index: linux-2.6/include/linux/rmap.h =================================================================== --- linux-2.6.orig/include/linux/rmap.h +++ linux-2.6/include/linux/rmap.h @@ -7,7 +7,7 @@ #include <linux/list.h> #include <linux/slab.h> #include <linux/mm.h> -#include <linux/spinlock.h> +#include <linux/mutex.h> #include <linux/memcontrol.h> /* @@ -25,7 +25,7 @@ * pointing to this anon_vma once its vma list is empty. */ struct anon_vma { - spinlock_t lock; /* Serialize access to vma list */ + struct mutex lock; /* Serialize access to vma list */ struct anon_vma *root; /* Root of this anon_vma tree */ /* * The refcount is taken on an anon_vma when there is no @@ -93,24 +93,24 @@ static inline void vma_lock_anon_vma(str { struct anon_vma *anon_vma = vma->anon_vma; if (anon_vma) - spin_lock(&anon_vma->root->lock); + mutex_lock(&anon_vma->root->lock); } static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) { struct anon_vma *anon_vma = vma->anon_vma; if (anon_vma) - spin_unlock(&anon_vma->root->lock); + mutex_unlock(&anon_vma->root->lock); } static inline void anon_vma_lock(struct anon_vma *anon_vma) { - spin_lock(&anon_vma->root->lock); + mutex_lock(&anon_vma->root->lock); } static inline void anon_vma_unlock(struct anon_vma *anon_vma) { - spin_unlock(&anon_vma->root->lock); + mutex_unlock(&anon_vma->root->lock); } /* Index: linux-2.6/kernel/fork.c =================================================================== --- linux-2.6.orig/kernel/fork.c +++ linux-2.6/kernel/fork.c @@ -369,7 +369,7 @@ static int dup_mmap(struct mm_struct *mm get_file(file); if (tmp->vm_flags & VM_DENYWRITE) atomic_dec(&inode->i_writecount); - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); if (tmp->vm_flags & VM_SHARED) mapping->i_mmap_writable++; tmp->vm_truncate_count = mpnt->vm_truncate_count; @@ -377,7 +377,7 @@ static int dup_mmap(struct mm_struct *mm /* insert tmp into the share list, just after mpnt */ vma_prio_tree_add(tmp, mpnt); flush_dcache_mmap_unlock(mapping); - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); } /* Index: linux-2.6/mm/filemap_xip.c =================================================================== --- linux-2.6.orig/mm/filemap_xip.c +++ linux-2.6/mm/filemap_xip.c @@ -183,7 +183,7 @@ __xip_unmap (struct address_space * mapp return; retry: - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { mm = vma->vm_mm; address = vma->vm_start + @@ -201,7 +201,7 @@ retry: page_cache_release(page); } } - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); if (locked) { mutex_unlock(&xip_sparse_mutex); Index: linux-2.6/mm/fremap.c =================================================================== --- linux-2.6.orig/mm/fremap.c +++ linux-2.6/mm/fremap.c @@ -208,13 +208,13 @@ SYSCALL_DEFINE5(remap_file_pages, unsign } goto out; } - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); flush_dcache_mmap_lock(mapping); vma->vm_flags |= VM_NONLINEAR; vma_prio_tree_remove(vma, &mapping->i_mmap); vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear); flush_dcache_mmap_unlock(mapping); - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); } if (vma->vm_flags & VM_LOCKED) { Index: linux-2.6/mm/hugetlb.c =================================================================== --- linux-2.6.orig/mm/hugetlb.c +++ linux-2.6/mm/hugetlb.c @@ -2248,9 +2248,9 @@ void __unmap_hugepage_range(struct vm_ar void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) { - spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); + mutex_lock(&vma->vm_file->f_mapping->i_mmap_lock); __unmap_hugepage_range(vma, start, end, ref_page); - spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); + mutex_unlock(&vma->vm_file->f_mapping->i_mmap_lock); } /* @@ -2282,7 +2282,7 @@ static int unmap_ref_private(struct mm_s * this mapping should be shared between all the VMAs, * __unmap_hugepage_range() is called as the lock is already held */ - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { /* Do not unmap the current VMA */ if (iter_vma == vma) @@ -2300,7 +2300,7 @@ static int unmap_ref_private(struct mm_s address, address + huge_page_size(h), page); } - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); return 1; } @@ -2775,7 +2775,7 @@ void hugetlb_change_protection(struct vm BUG_ON(address >= end); flush_cache_range(vma, address, end); - spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); + mutex_lock(&vma->vm_file->f_mapping->i_mmap_lock); spin_lock(&mm->page_table_lock); for (; address < end; address += huge_page_size(h)) { ptep = huge_pte_offset(mm, address); @@ -2790,7 +2790,7 @@ void hugetlb_change_protection(struct vm } } spin_unlock(&mm->page_table_lock); - spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); + mutex_unlock(&vma->vm_file->f_mapping->i_mmap_lock); flush_tlb_range(vma, start, end); } Index: linux-2.6/mm/memory-failure.c =================================================================== --- linux-2.6.orig/mm/memory-failure.c +++ linux-2.6/mm/memory-failure.c @@ -424,7 +424,7 @@ static void collect_procs_file(struct pa */ read_lock(&tasklist_lock); - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); for_each_process(tsk) { pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); @@ -444,7 +444,7 @@ static void collect_procs_file(struct pa add_to_kill(tsk, page, vma, to_kill, tkc); } } - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); read_unlock(&tasklist_lock); } Index: linux-2.6/mm/memory.c =================================================================== --- linux-2.6.orig/mm/memory.c +++ linux-2.6/mm/memory.c @@ -1177,7 +1177,7 @@ unsigned long unmap_vmas(struct mmu_gath { long zap_work = ZAP_BLOCK_SIZE; unsigned long start = start_addr; - spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; + struct mutex *i_mmap_lock = details ? details->i_mmap_lock : NULL; struct mm_struct *mm = vma->vm_mm; mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); @@ -1227,7 +1227,7 @@ unsigned long unmap_vmas(struct mmu_gath } if (need_resched() || - (i_mmap_lock && spin_needbreak(i_mmap_lock))) { + (i_mmap_lock && mutex_is_contended(i_mmap_lock))) { if (i_mmap_lock) goto out; cond_resched(); @@ -2520,7 +2520,7 @@ again: restart_addr = zap_page_range(vma, start_addr, end_addr - start_addr, details); - need_break = need_resched() || spin_needbreak(details->i_mmap_lock); + need_break = need_resched() || mutex_is_contended(details->i_mmap_lock); if (restart_addr >= end_addr) { /* We have now completed this vma: mark it so */ @@ -2534,9 +2534,9 @@ again: goto again; } - spin_unlock(details->i_mmap_lock); + mutex_unlock(details->i_mmap_lock); cond_resched(); - spin_lock(details->i_mmap_lock); + mutex_lock(details->i_mmap_lock); return -EINTR; } @@ -2632,7 +2632,7 @@ void unmap_mapping_range(struct address_ details.last_index = ULONG_MAX; details.i_mmap_lock = &mapping->i_mmap_lock; - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); /* Protect against endless unmapping loops */ mapping->truncate_count++; @@ -2647,7 +2647,7 @@ void unmap_mapping_range(struct address_ unmap_mapping_range_tree(&mapping->i_mmap, &details); if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); } EXPORT_SYMBOL(unmap_mapping_range); Index: linux-2.6/mm/mmap.c =================================================================== --- linux-2.6.orig/mm/mmap.c +++ linux-2.6/mm/mmap.c @@ -216,9 +216,9 @@ void unlink_file_vma(struct vm_area_stru if (file) { struct address_space *mapping = file->f_mapping; - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); __remove_shared_vm_struct(vma, file, mapping); - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); } } @@ -455,7 +455,7 @@ static void vma_link(struct mm_struct *m mapping = vma->vm_file->f_mapping; if (mapping) { - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); vma->vm_truncate_count = mapping->truncate_count; } @@ -463,7 +463,7 @@ static void vma_link(struct mm_struct *m __vma_link_file(vma); if (mapping) - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); mm->map_count++; validate_mm(mm); @@ -566,7 +566,7 @@ again: remove_next = 1 + (end > next-> mapping = file->f_mapping; if (!(vma->vm_flags & VM_NONLINEAR)) root = &mapping->i_mmap; - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); if (importer && vma->vm_truncate_count != next->vm_truncate_count) { /* @@ -640,7 +640,7 @@ again: remove_next = 1 + (end > next-> if (anon_vma) anon_vma_unlock(anon_vma); if (mapping) - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); if (remove_next) { if (file) { @@ -2497,7 +2497,7 @@ static void vm_lock_anon_vma(struct mm_s * The LSB of head.next can't change from under us * because we hold the mm_all_locks_mutex. */ - spin_lock_nest_lock(&anon_vma->root->lock, &mm->mmap_sem); + mutex_lock_nest_lock(&anon_vma->root->lock, &mm->mmap_sem); /* * We can safely modify head.next after taking the * anon_vma->root->lock. If some other vma in this mm shares @@ -2527,7 +2527,7 @@ static void vm_lock_mapping(struct mm_st */ if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); - spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem); + mutex_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem); } } @@ -2626,7 +2626,7 @@ static void vm_unlock_mapping(struct add * AS_MM_ALL_LOCKS can't change to 0 from under us * because we hold the mm_all_locks_mutex. */ - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); if (!test_and_clear_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); Index: linux-2.6/mm/mremap.c =================================================================== --- linux-2.6.orig/mm/mremap.c +++ linux-2.6/mm/mremap.c @@ -90,7 +90,7 @@ static void move_ptes(struct vm_area_str * and we propagate stale pages into the dst afterward. */ mapping = vma->vm_file->f_mapping; - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); if (new_vma->vm_truncate_count && new_vma->vm_truncate_count != vma->vm_truncate_count) new_vma->vm_truncate_count = 0; @@ -122,7 +122,7 @@ static void move_ptes(struct vm_area_str pte_unmap_nested(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); if (mapping) - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end); } Index: linux-2.6/mm/rmap.c =================================================================== --- linux-2.6.orig/mm/rmap.c +++ linux-2.6/mm/rmap.c @@ -302,7 +302,7 @@ static void anon_vma_ctor(void *data) { struct anon_vma *anon_vma = data; - spin_lock_init(&anon_vma->lock); + mutex_init(&anon_vma->lock); atomic_set(&anon_vma->refcount, 0); INIT_LIST_HEAD(&anon_vma->head); } @@ -635,7 +635,7 @@ static int page_referenced_file(struct p */ BUG_ON(!PageLocked(page)); - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); /* * i_mmap_lock does not stabilize mapcount at all, but mapcount @@ -660,7 +660,7 @@ static int page_referenced_file(struct p break; } - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); return referenced; } @@ -747,7 +747,7 @@ static int page_mkclean_file(struct addr BUG_ON(PageAnon(page)); - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { if (vma->vm_flags & VM_SHARED) { unsigned long address = vma_address(page, vma); @@ -756,7 +756,7 @@ static int page_mkclean_file(struct addr ret += page_mkclean_one(page, vma, address); } } - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); return ret; } @@ -1330,7 +1330,7 @@ static int try_to_unmap_file(struct page unsigned long max_nl_size = 0; unsigned int mapcount; - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { unsigned long address = vma_address(page, vma); if (address == -EFAULT) @@ -1376,7 +1376,7 @@ static int try_to_unmap_file(struct page mapcount = page_mapcount(page); if (!mapcount) goto out; - cond_resched_lock(&mapping->i_mmap_lock); + cond_resched(); max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; if (max_nl_cursor == 0) @@ -1398,7 +1398,7 @@ static int try_to_unmap_file(struct page } vma->vm_private_data = (void *) max_nl_cursor; } - cond_resched_lock(&mapping->i_mmap_lock); + cond_resched(); max_nl_cursor += CLUSTER_SIZE; } while (max_nl_cursor <= max_nl_size); @@ -1410,7 +1410,7 @@ static int try_to_unmap_file(struct page list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) vma->vm_private_data = NULL; out: - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); return ret; } @@ -1525,7 +1525,7 @@ static int rmap_walk_file(struct page *p if (!mapping) return ret; - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { unsigned long address = vma_address(page, vma); if (address == -EFAULT) @@ -1539,7 +1539,7 @@ static int rmap_walk_file(struct page *p * never contain migration ptes. Decide what to do about this * limitation to linear when we need rmap_walk() on nonlinear. */ - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); return ret; } Index: linux-2.6/fs/gfs2/main.c =================================================================== --- linux-2.6.orig/fs/gfs2/main.c +++ linux-2.6/fs/gfs2/main.c @@ -61,7 +61,7 @@ static void gfs2_init_gl_aspace_once(voi memset(mapping, 0, sizeof(*mapping)); INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); spin_lock_init(&mapping->tree_lock); - spin_lock_init(&mapping->i_mmap_lock); + mutex_init(&mapping->i_mmap_lock); INIT_LIST_HEAD(&mapping->private_list); spin_lock_init(&mapping->private_lock); INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); Index: linux-2.6/fs/nilfs2/btnode.c =================================================================== --- linux-2.6.orig/fs/nilfs2/btnode.c +++ linux-2.6/fs/nilfs2/btnode.c @@ -43,7 +43,7 @@ void nilfs_btnode_cache_init_once(struct INIT_LIST_HEAD(&btnc->private_list); spin_lock_init(&btnc->private_lock); - spin_lock_init(&btnc->i_mmap_lock); + mutex_init(&btnc->i_mmap_lock); INIT_RAW_PRIO_TREE_ROOT(&btnc->i_mmap); INIT_LIST_HEAD(&btnc->i_mmap_nonlinear); }
WARNING: multiple messages have this Message-ID (diff)
From: Peter Zijlstra <a.p.zijlstra@chello.nl> To: Andrea Arcangeli <aarcange@redhat.com>, Avi Kivity <avi@redhat.com>, Thomas Gleixner <tglx@linutronix.de>, Rik van Riel <riel@redhat.com>, Ingo Molnar <mingo@elte.hu>, akpm@linux-foundation.org, Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org, Benjamin Herrenschmidt <benh@kernel.crashing.org>, David Miller <davem@davemloft.net>, Hugh Dickins <hugh.dickins@tiscali.co.uk>, Mel Gorman <mel@csn.ul.ie>, Nick Piggin <npiggin@kernel.dk>, Peter Zijlstra <a.p.zijlstra@chello.nl>, Paul McKenney <paulmck@linux.vnet.ibm.com>, Yanmin Zhang <yanmin_zhang@linux.intel.com>, Stephen Rothwell <sfr@canb.auug.org.au> Subject: [PATCH 18/20] mm: Convert i_mmap_lock and anon_vma->lock to mutexes Date: Mon, 18 Oct 2010 13:24:51 +0200 [thread overview] Message-ID: <20101018112951.234380099@chello.nl> (raw) Message-ID: <20101018112451.Vzoo9Xpc-MauT23VTmXdOIu0GDXppkrcHCp8d35LyFU@z> (raw) In-Reply-To: 20101018112433.556591568@chello.nl [-- Attachment #1: mm-mutex.patch --] [-- Type: text/plain, Size: 19734 bytes --] Straight fwd conversion of i_mmap_lock and anon_vma->lock to mutexes. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> --- arch/x86/mm/hugetlbpage.c | 4 ++-- fs/gfs2/main.c | 2 +- fs/hugetlbfs/inode.c | 4 ++-- fs/inode.c | 2 +- fs/nilfs2/btnode.c | 2 +- include/linux/fs.h | 2 +- include/linux/mm.h | 2 +- include/linux/rmap.h | 12 ++++++------ kernel/fork.c | 4 ++-- mm/filemap_xip.c | 4 ++-- mm/fremap.c | 4 ++-- mm/hugetlb.c | 12 ++++++------ mm/memory-failure.c | 4 ++-- mm/memory.c | 14 +++++++------- mm/mmap.c | 18 +++++++++--------- mm/mremap.c | 4 ++-- mm/rmap.c | 22 +++++++++++----------- 17 files changed, 58 insertions(+), 58 deletions(-) Index: linux-2.6/arch/x86/mm/hugetlbpage.c =================================================================== --- linux-2.6.orig/arch/x86/mm/hugetlbpage.c +++ linux-2.6/arch/x86/mm/hugetlbpage.c @@ -72,7 +72,7 @@ static void huge_pmd_share(struct mm_str if (!vma_shareable(vma, addr)) return; - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { if (svma == vma) continue; @@ -97,7 +97,7 @@ static void huge_pmd_share(struct mm_str put_page(virt_to_page(spte)); spin_unlock(&mm->page_table_lock); out: - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); } /* Index: linux-2.6/fs/hugetlbfs/inode.c =================================================================== --- linux-2.6.orig/fs/hugetlbfs/inode.c +++ linux-2.6/fs/hugetlbfs/inode.c @@ -412,10 +412,10 @@ static int hugetlb_vmtruncate(struct ino pgoff = offset >> PAGE_SHIFT; i_size_write(inode, offset); - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); if (!prio_tree_empty(&mapping->i_mmap)) hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); truncate_hugepages(inode, offset); return 0; } Index: linux-2.6/fs/inode.c =================================================================== --- linux-2.6.orig/fs/inode.c +++ linux-2.6/fs/inode.c @@ -257,7 +257,7 @@ void inode_init_once(struct inode *inode INIT_LIST_HEAD(&inode->i_devices); INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); spin_lock_init(&inode->i_data.tree_lock); - spin_lock_init(&inode->i_data.i_mmap_lock); + mutex_init(&inode->i_data.i_mmap_lock); INIT_LIST_HEAD(&inode->i_data.private_list); spin_lock_init(&inode->i_data.private_lock); INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); Index: linux-2.6/include/linux/fs.h =================================================================== --- linux-2.6.orig/include/linux/fs.h +++ linux-2.6/include/linux/fs.h @@ -626,7 +626,7 @@ struct address_space { unsigned int i_mmap_writable;/* count VM_SHARED mappings */ struct prio_tree_root i_mmap; /* tree of private and shared mappings */ struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ - spinlock_t i_mmap_lock; /* protect tree, count, list */ + struct mutex i_mmap_lock; /* protect tree, count, list */ unsigned int truncate_count; /* Cover race condition with truncate */ unsigned long nrpages; /* number of total pages */ pgoff_t writeback_index;/* writeback starts here */ Index: linux-2.6/include/linux/mm.h =================================================================== --- linux-2.6.orig/include/linux/mm.h +++ linux-2.6/include/linux/mm.h @@ -758,7 +758,7 @@ struct zap_details { struct address_space *check_mapping; /* Check page->mapping if set */ pgoff_t first_index; /* Lowest page->index to unmap */ pgoff_t last_index; /* Highest page->index to unmap */ - spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */ + struct mutex *i_mmap_lock; /* For unmap_mapping_range: */ unsigned long truncate_count; /* Compare vm_truncate_count */ }; Index: linux-2.6/include/linux/rmap.h =================================================================== --- linux-2.6.orig/include/linux/rmap.h +++ linux-2.6/include/linux/rmap.h @@ -7,7 +7,7 @@ #include <linux/list.h> #include <linux/slab.h> #include <linux/mm.h> -#include <linux/spinlock.h> +#include <linux/mutex.h> #include <linux/memcontrol.h> /* @@ -25,7 +25,7 @@ * pointing to this anon_vma once its vma list is empty. */ struct anon_vma { - spinlock_t lock; /* Serialize access to vma list */ + struct mutex lock; /* Serialize access to vma list */ struct anon_vma *root; /* Root of this anon_vma tree */ /* * The refcount is taken on an anon_vma when there is no @@ -93,24 +93,24 @@ static inline void vma_lock_anon_vma(str { struct anon_vma *anon_vma = vma->anon_vma; if (anon_vma) - spin_lock(&anon_vma->root->lock); + mutex_lock(&anon_vma->root->lock); } static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) { struct anon_vma *anon_vma = vma->anon_vma; if (anon_vma) - spin_unlock(&anon_vma->root->lock); + mutex_unlock(&anon_vma->root->lock); } static inline void anon_vma_lock(struct anon_vma *anon_vma) { - spin_lock(&anon_vma->root->lock); + mutex_lock(&anon_vma->root->lock); } static inline void anon_vma_unlock(struct anon_vma *anon_vma) { - spin_unlock(&anon_vma->root->lock); + mutex_unlock(&anon_vma->root->lock); } /* Index: linux-2.6/kernel/fork.c =================================================================== --- linux-2.6.orig/kernel/fork.c +++ linux-2.6/kernel/fork.c @@ -369,7 +369,7 @@ static int dup_mmap(struct mm_struct *mm get_file(file); if (tmp->vm_flags & VM_DENYWRITE) atomic_dec(&inode->i_writecount); - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); if (tmp->vm_flags & VM_SHARED) mapping->i_mmap_writable++; tmp->vm_truncate_count = mpnt->vm_truncate_count; @@ -377,7 +377,7 @@ static int dup_mmap(struct mm_struct *mm /* insert tmp into the share list, just after mpnt */ vma_prio_tree_add(tmp, mpnt); flush_dcache_mmap_unlock(mapping); - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); } /* Index: linux-2.6/mm/filemap_xip.c =================================================================== --- linux-2.6.orig/mm/filemap_xip.c +++ linux-2.6/mm/filemap_xip.c @@ -183,7 +183,7 @@ __xip_unmap (struct address_space * mapp return; retry: - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { mm = vma->vm_mm; address = vma->vm_start + @@ -201,7 +201,7 @@ retry: page_cache_release(page); } } - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); if (locked) { mutex_unlock(&xip_sparse_mutex); Index: linux-2.6/mm/fremap.c =================================================================== --- linux-2.6.orig/mm/fremap.c +++ linux-2.6/mm/fremap.c @@ -208,13 +208,13 @@ SYSCALL_DEFINE5(remap_file_pages, unsign } goto out; } - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); flush_dcache_mmap_lock(mapping); vma->vm_flags |= VM_NONLINEAR; vma_prio_tree_remove(vma, &mapping->i_mmap); vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear); flush_dcache_mmap_unlock(mapping); - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); } if (vma->vm_flags & VM_LOCKED) { Index: linux-2.6/mm/hugetlb.c =================================================================== --- linux-2.6.orig/mm/hugetlb.c +++ linux-2.6/mm/hugetlb.c @@ -2248,9 +2248,9 @@ void __unmap_hugepage_range(struct vm_ar void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) { - spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); + mutex_lock(&vma->vm_file->f_mapping->i_mmap_lock); __unmap_hugepage_range(vma, start, end, ref_page); - spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); + mutex_unlock(&vma->vm_file->f_mapping->i_mmap_lock); } /* @@ -2282,7 +2282,7 @@ static int unmap_ref_private(struct mm_s * this mapping should be shared between all the VMAs, * __unmap_hugepage_range() is called as the lock is already held */ - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { /* Do not unmap the current VMA */ if (iter_vma == vma) @@ -2300,7 +2300,7 @@ static int unmap_ref_private(struct mm_s address, address + huge_page_size(h), page); } - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); return 1; } @@ -2775,7 +2775,7 @@ void hugetlb_change_protection(struct vm BUG_ON(address >= end); flush_cache_range(vma, address, end); - spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); + mutex_lock(&vma->vm_file->f_mapping->i_mmap_lock); spin_lock(&mm->page_table_lock); for (; address < end; address += huge_page_size(h)) { ptep = huge_pte_offset(mm, address); @@ -2790,7 +2790,7 @@ void hugetlb_change_protection(struct vm } } spin_unlock(&mm->page_table_lock); - spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); + mutex_unlock(&vma->vm_file->f_mapping->i_mmap_lock); flush_tlb_range(vma, start, end); } Index: linux-2.6/mm/memory-failure.c =================================================================== --- linux-2.6.orig/mm/memory-failure.c +++ linux-2.6/mm/memory-failure.c @@ -424,7 +424,7 @@ static void collect_procs_file(struct pa */ read_lock(&tasklist_lock); - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); for_each_process(tsk) { pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); @@ -444,7 +444,7 @@ static void collect_procs_file(struct pa add_to_kill(tsk, page, vma, to_kill, tkc); } } - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); read_unlock(&tasklist_lock); } Index: linux-2.6/mm/memory.c =================================================================== --- linux-2.6.orig/mm/memory.c +++ linux-2.6/mm/memory.c @@ -1177,7 +1177,7 @@ unsigned long unmap_vmas(struct mmu_gath { long zap_work = ZAP_BLOCK_SIZE; unsigned long start = start_addr; - spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; + struct mutex *i_mmap_lock = details ? details->i_mmap_lock : NULL; struct mm_struct *mm = vma->vm_mm; mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); @@ -1227,7 +1227,7 @@ unsigned long unmap_vmas(struct mmu_gath } if (need_resched() || - (i_mmap_lock && spin_needbreak(i_mmap_lock))) { + (i_mmap_lock && mutex_is_contended(i_mmap_lock))) { if (i_mmap_lock) goto out; cond_resched(); @@ -2520,7 +2520,7 @@ again: restart_addr = zap_page_range(vma, start_addr, end_addr - start_addr, details); - need_break = need_resched() || spin_needbreak(details->i_mmap_lock); + need_break = need_resched() || mutex_is_contended(details->i_mmap_lock); if (restart_addr >= end_addr) { /* We have now completed this vma: mark it so */ @@ -2534,9 +2534,9 @@ again: goto again; } - spin_unlock(details->i_mmap_lock); + mutex_unlock(details->i_mmap_lock); cond_resched(); - spin_lock(details->i_mmap_lock); + mutex_lock(details->i_mmap_lock); return -EINTR; } @@ -2632,7 +2632,7 @@ void unmap_mapping_range(struct address_ details.last_index = ULONG_MAX; details.i_mmap_lock = &mapping->i_mmap_lock; - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); /* Protect against endless unmapping loops */ mapping->truncate_count++; @@ -2647,7 +2647,7 @@ void unmap_mapping_range(struct address_ unmap_mapping_range_tree(&mapping->i_mmap, &details); if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); } EXPORT_SYMBOL(unmap_mapping_range); Index: linux-2.6/mm/mmap.c =================================================================== --- linux-2.6.orig/mm/mmap.c +++ linux-2.6/mm/mmap.c @@ -216,9 +216,9 @@ void unlink_file_vma(struct vm_area_stru if (file) { struct address_space *mapping = file->f_mapping; - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); __remove_shared_vm_struct(vma, file, mapping); - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); } } @@ -455,7 +455,7 @@ static void vma_link(struct mm_struct *m mapping = vma->vm_file->f_mapping; if (mapping) { - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); vma->vm_truncate_count = mapping->truncate_count; } @@ -463,7 +463,7 @@ static void vma_link(struct mm_struct *m __vma_link_file(vma); if (mapping) - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); mm->map_count++; validate_mm(mm); @@ -566,7 +566,7 @@ again: remove_next = 1 + (end > next-> mapping = file->f_mapping; if (!(vma->vm_flags & VM_NONLINEAR)) root = &mapping->i_mmap; - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); if (importer && vma->vm_truncate_count != next->vm_truncate_count) { /* @@ -640,7 +640,7 @@ again: remove_next = 1 + (end > next-> if (anon_vma) anon_vma_unlock(anon_vma); if (mapping) - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); if (remove_next) { if (file) { @@ -2497,7 +2497,7 @@ static void vm_lock_anon_vma(struct mm_s * The LSB of head.next can't change from under us * because we hold the mm_all_locks_mutex. */ - spin_lock_nest_lock(&anon_vma->root->lock, &mm->mmap_sem); + mutex_lock_nest_lock(&anon_vma->root->lock, &mm->mmap_sem); /* * We can safely modify head.next after taking the * anon_vma->root->lock. If some other vma in this mm shares @@ -2527,7 +2527,7 @@ static void vm_lock_mapping(struct mm_st */ if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); - spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem); + mutex_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem); } } @@ -2626,7 +2626,7 @@ static void vm_unlock_mapping(struct add * AS_MM_ALL_LOCKS can't change to 0 from under us * because we hold the mm_all_locks_mutex. */ - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); if (!test_and_clear_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); Index: linux-2.6/mm/mremap.c =================================================================== --- linux-2.6.orig/mm/mremap.c +++ linux-2.6/mm/mremap.c @@ -90,7 +90,7 @@ static void move_ptes(struct vm_area_str * and we propagate stale pages into the dst afterward. */ mapping = vma->vm_file->f_mapping; - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); if (new_vma->vm_truncate_count && new_vma->vm_truncate_count != vma->vm_truncate_count) new_vma->vm_truncate_count = 0; @@ -122,7 +122,7 @@ static void move_ptes(struct vm_area_str pte_unmap_nested(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); if (mapping) - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end); } Index: linux-2.6/mm/rmap.c =================================================================== --- linux-2.6.orig/mm/rmap.c +++ linux-2.6/mm/rmap.c @@ -302,7 +302,7 @@ static void anon_vma_ctor(void *data) { struct anon_vma *anon_vma = data; - spin_lock_init(&anon_vma->lock); + mutex_init(&anon_vma->lock); atomic_set(&anon_vma->refcount, 0); INIT_LIST_HEAD(&anon_vma->head); } @@ -635,7 +635,7 @@ static int page_referenced_file(struct p */ BUG_ON(!PageLocked(page)); - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); /* * i_mmap_lock does not stabilize mapcount at all, but mapcount @@ -660,7 +660,7 @@ static int page_referenced_file(struct p break; } - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); return referenced; } @@ -747,7 +747,7 @@ static int page_mkclean_file(struct addr BUG_ON(PageAnon(page)); - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { if (vma->vm_flags & VM_SHARED) { unsigned long address = vma_address(page, vma); @@ -756,7 +756,7 @@ static int page_mkclean_file(struct addr ret += page_mkclean_one(page, vma, address); } } - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); return ret; } @@ -1330,7 +1330,7 @@ static int try_to_unmap_file(struct page unsigned long max_nl_size = 0; unsigned int mapcount; - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { unsigned long address = vma_address(page, vma); if (address == -EFAULT) @@ -1376,7 +1376,7 @@ static int try_to_unmap_file(struct page mapcount = page_mapcount(page); if (!mapcount) goto out; - cond_resched_lock(&mapping->i_mmap_lock); + cond_resched(); max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; if (max_nl_cursor == 0) @@ -1398,7 +1398,7 @@ static int try_to_unmap_file(struct page } vma->vm_private_data = (void *) max_nl_cursor; } - cond_resched_lock(&mapping->i_mmap_lock); + cond_resched(); max_nl_cursor += CLUSTER_SIZE; } while (max_nl_cursor <= max_nl_size); @@ -1410,7 +1410,7 @@ static int try_to_unmap_file(struct page list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) vma->vm_private_data = NULL; out: - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); return ret; } @@ -1525,7 +1525,7 @@ static int rmap_walk_file(struct page *p if (!mapping) return ret; - spin_lock(&mapping->i_mmap_lock); + mutex_lock(&mapping->i_mmap_lock); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { unsigned long address = vma_address(page, vma); if (address == -EFAULT) @@ -1539,7 +1539,7 @@ static int rmap_walk_file(struct page *p * never contain migration ptes. Decide what to do about this * limitation to linear when we need rmap_walk() on nonlinear. */ - spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->i_mmap_lock); return ret; } Index: linux-2.6/fs/gfs2/main.c =================================================================== --- linux-2.6.orig/fs/gfs2/main.c +++ linux-2.6/fs/gfs2/main.c @@ -61,7 +61,7 @@ static void gfs2_init_gl_aspace_once(voi memset(mapping, 0, sizeof(*mapping)); INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); spin_lock_init(&mapping->tree_lock); - spin_lock_init(&mapping->i_mmap_lock); + mutex_init(&mapping->i_mmap_lock); INIT_LIST_HEAD(&mapping->private_list); spin_lock_init(&mapping->private_lock); INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); Index: linux-2.6/fs/nilfs2/btnode.c =================================================================== --- linux-2.6.orig/fs/nilfs2/btnode.c +++ linux-2.6/fs/nilfs2/btnode.c @@ -43,7 +43,7 @@ void nilfs_btnode_cache_init_once(struct INIT_LIST_HEAD(&btnc->private_list); spin_lock_init(&btnc->private_lock); - spin_lock_init(&btnc->i_mmap_lock); + mutex_init(&btnc->i_mmap_lock); INIT_RAW_PRIO_TREE_ROOT(&btnc->i_mmap); INIT_LIST_HEAD(&btnc->i_mmap_nonlinear); }
next prev parent reply other threads:[~2010-10-18 11:24 UTC|newest] Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top 2010-10-18 11:24 [PATCH 00/20] mm: Preemptibility -v5 Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 01/20] powerpc: Use call_rcu_sched() for pagetables Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 02/20] mm: Improve page_lock_anon_vma() comment Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 03/20] mm: Rename drop_anon_vma to put_anon_vma Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 04/20] mm: Move anon_vma ref out from under CONFIG_KSM Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 05/20] mm: Simplify anon_vma refcounts Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 06/20] mm: Use refcounts for page_lock_anon_vma() Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 07/20] mm: Preemptible mmu_gather Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 08/20] powerpc: " Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 09/20] sparc: " Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 10/20] s390: preemptible mmu_gather Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 11/20] arm: Preemptible mmu_gather Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 12/20] sh: " Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 13/20] um: " Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 14/20] ia64: " Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 15/20] mm, powerpc: Move the RCU page-table freeing into generic code Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 16/20] lockdep, mutex: Provide mutex_lock_nest_lock Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 17/20] mutex: Provide mutex_is_contended Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra [this message] 2010-10-18 11:24 ` [PATCH 18/20] mm: Convert i_mmap_lock and anon_vma->lock to mutexes Peter Zijlstra 2010-10-18 11:24 ` [PATCH 19/20] mm: Extended batches for generic mmu_gather Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 11:24 ` [PATCH 20/20] mm: Optimize page_lock_anon_vma() fast-path Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra 2010-10-18 14:55 ` [PATCH 00/20] mm: Preemptibility -v5 Stephen Rothwell -- strict thread matches above, loose matches on Subject: below -- 2010-08-28 14:16 [PATCH 00/20] mm: Preemptibility -v4 Peter Zijlstra 2010-08-28 14:16 ` [PATCH 18/20] mm: Convert i_mmap_lock and anon_vma->lock to mutexes Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20101018112951.234380099@chello.nl \ --to=a.p.zijlstra@chello.nl \ --cc=aarcange@redhat.com \ --cc=akpm@linux-fou \ --cc=avi@redhat.com \ --cc=benh@kernel.crashing.org \ --cc=davem@davemloft.net \ --cc=hugh.dickins@tiscali.co.uk \ --cc=linux-arch@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=mel@csn.ul.ie \ --cc=mingo@elte.hu \ --cc=npiggin@kernel.dk \ --cc=paulmck@linux.vnet.ibm.com \ --cc=riel@redhat.com \ --cc=sfr@canb.auug.org.au \ --cc=tglx@linutronix.de \ --cc=yanmin_zhang@linux.intel.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).