* [PATCH] rmap 20 i_mmap_shared into i_mmap
@ 2004-05-04 22:18 Hugh Dickins
2004-05-04 22:19 ` [PATCH] rmap 21 try_to_unmap_one mapcount Hugh Dickins
` (2 more replies)
0 siblings, 3 replies; 8+ messages in thread
From: Hugh Dickins @ 2004-05-04 22:18 UTC (permalink / raw)
To: Andrew Morton
Cc: Martin J. Bligh, Russell King, James Bottomley, linux-kernel
First of a batch of four patches against 2.6.6-rc3-mm1.
rmap 20 i_mmap_shared into i_mmap
Why should struct address_space have separate i_mmap and i_mmap_shared
prio_trees (separating !VM_SHARED and VM_SHARED vmas)? No good reason,
the same processing is usually needed on both. Merge i_mmap_shared into
i_mmap, but keep i_mmap_writable count of VM_SHARED vmas (those capable
of dirtying the underlying file) for the mapping_writably_mapped test.
The VM_MAYSHARE test in the arm and parisc loops is not necessarily what
they will want to use in the end: it's provided as a harmless example of
what might be appropriate, but maintainers are likely to revise it later
(that parisc loop is currently being changed in the parisc tree anyway).
On the way, remove the now out-of-date comments on vm_area_struct size.
Documentation/cachetlb.txt | 8 ++++----
arch/arm/mm/fault-armv.c | 8 ++++++--
arch/parisc/kernel/cache.c | 23 ++---------------------
fs/hugetlbfs/inode.c | 4 +---
fs/inode.c | 1 -
include/linux/fs.h | 12 +++++-------
include/linux/mm.h | 11 +++--------
mm/fremap.c | 2 +-
mm/memory.c | 10 ++--------
mm/mmap.c | 22 ++++++++++------------
mm/prio_tree.c | 6 +++---
mm/rmap.c | 26 --------------------------
12 files changed, 37 insertions(+), 96 deletions(-)
--- 2.6.6-rc3-mm1/Documentation/cachetlb.txt 2003-10-08 20:24:56.000000000 +0100
+++ rmap20/Documentation/cachetlb.txt 2004-05-04 21:21:28.882451984 +0100
@@ -322,10 +322,10 @@ maps this page at its virtual address.
about doing this.
The idea is, first at flush_dcache_page() time, if
- page->mapping->i_mmap{,_shared} are empty lists, just mark the
- architecture private page flag bit. Later, in
- update_mmu_cache(), a check is made of this flag bit, and if
- set the flush is done and the flag bit is cleared.
+ page->mapping->i_mmap is an empty tree and ->i_mmap_nonlinear
+ an empty list, just mark the architecture private page flag bit.
+ Later, in update_mmu_cache(), a check is made of this flag bit,
+ and if set the flush is done and the flag bit is cleared.
IMPORTANT NOTE: It is often important, if you defer the flush,
that the actual flush occurs on the same CPU
--- 2.6.6-rc3-mm1/arch/arm/mm/fault-armv.c 2004-04-30 11:58:41.000000000 +0100
+++ rmap20/arch/arm/mm/fault-armv.c 2004-05-04 21:21:28.883451832 +0100
@@ -94,13 +94,15 @@ void __flush_dcache_page(struct page *pa
* and invalidate any user data.
*/
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap_shared,
+ while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
&iter, pgoff, pgoff)) != NULL) {
/*
* If this VMA is not in our MM, we can ignore it.
*/
if (mpnt->vm_mm != mm)
continue;
+ if (!(mpnt->vm_flags & VM_MAYSHARE))
+ continue;
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
flush_cache_page(mpnt, mpnt->vm_start + offset);
}
@@ -127,7 +129,7 @@ make_coherent(struct vm_area_struct *vma
* space, then we need to handle them specially to maintain
* cache coherency.
*/
- while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap_shared,
+ while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
&iter, pgoff, pgoff)) != NULL) {
/*
* If this VMA is not in our MM, we can ignore it.
@@ -136,6 +138,8 @@ make_coherent(struct vm_area_struct *vma
*/
if (mpnt->vm_mm != mm || mpnt == vma)
continue;
+ if (!(mpnt->vm_flags & VM_MAYSHARE))
+ continue;
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
aliases += adjust_pte(mpnt, mpnt->vm_start + offset);
}
--- 2.6.6-rc3-mm1/arch/parisc/kernel/cache.c 2004-04-30 11:58:43.000000000 +0100
+++ rmap20/arch/parisc/kernel/cache.c 2004-05-04 21:21:28.883451832 +0100
@@ -244,33 +244,14 @@ void __flush_dcache_page(struct page *pa
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- /* check shared list first if it's not empty...it's usually
- * the shortest */
- while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap_shared,
+ while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
&iter, pgoff, pgoff)) != NULL) {
/*
* If this VMA is not in our MM, we can ignore it.
*/
if (mpnt->vm_mm != mm)
continue;
-
- offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
- flush_cache_page(mpnt, mpnt->vm_start + offset);
-
- /* All user shared mappings should be equivalently mapped,
- * so once we've flushed one we should be ok
- */
- return;
- }
-
- /* then check private mapping list for read only shared mappings
- * which are flagged by VM_MAYSHARE */
- while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
- &iter, pgoff, pgoff)) != NULL) {
- /*
- * If this VMA is not in our MM, we can ignore it.
- */
- if (mpnt->vm_mm != mm || !(mpnt->vm_flags & VM_MAYSHARE))
+ if (!(mpnt->vm_flags & VM_MAYSHARE))
continue;
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
--- 2.6.6-rc3-mm1/fs/hugetlbfs/inode.c 2004-04-30 11:58:45.000000000 +0100
+++ rmap20/fs/hugetlbfs/inode.c 2004-05-04 21:21:28.885451528 +0100
@@ -266,7 +266,7 @@ static void hugetlbfs_drop_inode(struct
* h_pgoff is in HPAGE_SIZE units.
* vma->vm_pgoff is in PAGE_SIZE units.
*/
-static void
+static inline void
hugetlb_vmtruncate_list(struct prio_tree_root *root, unsigned long h_pgoff)
{
struct vm_area_struct *vma = NULL;
@@ -312,8 +312,6 @@ static int hugetlb_vmtruncate(struct ino
spin_lock(&mapping->i_shared_lock);
if (!prio_tree_empty(&mapping->i_mmap))
hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
- if (!prio_tree_empty(&mapping->i_mmap_shared))
- hugetlb_vmtruncate_list(&mapping->i_mmap_shared, pgoff);
spin_unlock(&mapping->i_shared_lock);
truncate_hugepages(mapping, offset);
return 0;
--- 2.6.6-rc3-mm1/fs/inode.c 2004-04-30 11:58:45.000000000 +0100
+++ rmap20/fs/inode.c 2004-05-04 21:21:28.886451376 +0100
@@ -189,7 +189,6 @@ void inode_init_once(struct inode *inode
INIT_LIST_HEAD(&inode->i_data.private_list);
spin_lock_init(&inode->i_data.private_lock);
INIT_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
- INIT_PRIO_TREE_ROOT(&inode->i_data.i_mmap_shared);
INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
spin_lock_init(&inode->i_lock);
i_size_ordered_init(inode);
--- 2.6.6-rc3-mm1/include/linux/fs.h 2004-04-30 11:58:46.000000000 +0100
+++ rmap20/include/linux/fs.h 2004-05-04 21:21:28.888451072 +0100
@@ -331,9 +331,9 @@ struct address_space {
pgoff_t writeback_index;/* writeback starts here */
struct address_space_operations *a_ops; /* methods */
struct prio_tree_root i_mmap; /* tree of private mappings */
- struct prio_tree_root i_mmap_shared; /* tree of shared mappings */
- struct list_head i_mmap_nonlinear;/*list of nonlinear mappings */
- spinlock_t i_shared_lock; /* protect trees & list above */
+ unsigned int i_mmap_writable;/* count VM_SHARED mappings */
+ struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
+ spinlock_t i_shared_lock; /* protect tree, count, list */
atomic_t truncate_count; /* Cover race condition with truncate */
unsigned long flags; /* error bits/gfp mask */
struct backing_dev_info *backing_dev_info; /* device readahead, etc */
@@ -382,20 +382,18 @@ int mapping_tagged(struct address_space
static inline int mapping_mapped(struct address_space *mapping)
{
return !prio_tree_empty(&mapping->i_mmap) ||
- !prio_tree_empty(&mapping->i_mmap_shared) ||
!list_empty(&mapping->i_mmap_nonlinear);
}
/*
* Might pages of this file have been modified in userspace?
- * Note that i_mmap_shared holds all the VM_SHARED vmas: do_mmap_pgoff
+ * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
* marks vma as VM_SHARED if it is shared, and the file was opened for
* writing i.e. vma may be mprotected writable even if now readonly.
*/
static inline int mapping_writably_mapped(struct address_space *mapping)
{
- return !prio_tree_empty(&mapping->i_mmap_shared) ||
- !list_empty(&mapping->i_mmap_nonlinear);
+ return mapping->i_mmap_writable != 0;
}
/*
--- 2.6.6-rc3-mm1/include/linux/mm.h 2004-04-30 11:58:46.000000000 +0100
+++ rmap20/include/linux/mm.h 2004-05-04 21:21:28.890450768 +0100
@@ -46,13 +46,6 @@ extern int page_cluster;
* per VM-area/task. A VM area is any part of the process virtual memory
* space that has a special rule for the page-fault handlers (ie a shared
* library, the executable area etc).
- *
- * This structure is exactly 64 bytes on ia32. Please think very, very hard
- * before adding anything to it.
- * [Now 4 bytes more on 32bit NUMA machines. Sorry. -AK.
- * But if you want to recover the 4 bytes justr remove vm_next. It is redundant
- * with vm_rb. Will be a lot of editing work though. vm_rb.color is redundant
- * too.]
*/
struct vm_area_struct {
struct mm_struct * vm_mm; /* The address space we belong to. */
@@ -70,7 +63,9 @@ struct vm_area_struct {
/*
* For areas with an address space and backing store,
- * one of the address_space->i_mmap{,shared} trees.
+ * linkage into the address_space->i_mmap prio tree, or
+ * linkage to the list of like vmas hanging off its node, or
+ * linkage of vma in the address_space->i_mmap_nonlinear list.
*/
union {
struct {
--- 2.6.6-rc3-mm1/mm/fremap.c 2004-04-30 11:58:47.000000000 +0100
+++ rmap20/mm/fremap.c 2004-05-04 21:21:28.890450768 +0100
@@ -203,7 +203,7 @@ asmlinkage long sys_remap_file_pages(uns
mapping = vma->vm_file->f_mapping;
spin_lock(&mapping->i_shared_lock);
vma->vm_flags |= VM_NONLINEAR;
- vma_prio_tree_remove(vma, &mapping->i_mmap_shared);
+ vma_prio_tree_remove(vma, &mapping->i_mmap);
vma_prio_tree_init(vma);
list_add_tail(&vma->shared.vm_set.list,
&mapping->i_mmap_nonlinear);
--- 2.6.6-rc3-mm1/mm/memory.c 2004-04-30 11:58:47.000000000 +0100
+++ rmap20/mm/memory.c 2004-05-04 21:21:28.892450464 +0100
@@ -1118,8 +1118,8 @@ no_new_page:
/*
* Helper function for unmap_mapping_range().
*/
-static void unmap_mapping_range_list(struct prio_tree_root *root,
- struct zap_details *details)
+static inline void unmap_mapping_range_list(struct prio_tree_root *root,
+ struct zap_details *details)
{
struct vm_area_struct *vma = NULL;
struct prio_tree_iter iter;
@@ -1188,12 +1188,6 @@ void unmap_mapping_range(struct address_
if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
unmap_mapping_range_list(&mapping->i_mmap, &details);
- /* Don't waste time to check mapping on fully shared vmas */
- details.check_mapping = NULL;
-
- if (unlikely(!prio_tree_empty(&mapping->i_mmap_shared)))
- unmap_mapping_range_list(&mapping->i_mmap_shared, &details);
-
if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) {
struct vm_area_struct *vma;
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
--- 2.6.6-rc3-mm1/mm/mmap.c 2004-04-30 11:58:47.000000000 +0100
+++ rmap20/mm/mmap.c 2004-05-04 21:21:28.894450160 +0100
@@ -69,11 +69,11 @@ static inline void __remove_shared_vm_st
{
if (vma->vm_flags & VM_DENYWRITE)
atomic_inc(&file->f_dentry->d_inode->i_writecount);
+ if (vma->vm_flags & VM_SHARED)
+ mapping->i_mmap_writable--;
if (unlikely(vma->vm_flags & VM_NONLINEAR))
list_del_init(&vma->shared.vm_set.list);
- else if (vma->vm_flags & VM_SHARED)
- vma_prio_tree_remove(vma, &mapping->i_mmap_shared);
else
vma_prio_tree_remove(vma, &mapping->i_mmap);
}
@@ -261,12 +261,12 @@ static inline void __vma_link_file(struc
if (vma->vm_flags & VM_DENYWRITE)
atomic_dec(&file->f_dentry->d_inode->i_writecount);
+ if (vma->vm_flags & VM_SHARED)
+ mapping->i_mmap_writable++;
if (unlikely(vma->vm_flags & VM_NONLINEAR))
list_add_tail(&vma->shared.vm_set.list,
&mapping->i_mmap_nonlinear);
- else if (vma->vm_flags & VM_SHARED)
- vma_prio_tree_insert(vma, &mapping->i_mmap_shared);
else
vma_prio_tree_insert(vma, &mapping->i_mmap);
}
@@ -306,8 +306,8 @@ static void vma_link(struct mm_struct *m
}
/*
- * Insert vm structure into process list sorted by address and into the inode's
- * i_mmap ring. The caller should hold mm->page_table_lock and
+ * Insert vm structure into process list sorted by address and into the
+ * inode's i_mmap tree. The caller should hold mm->page_table_lock and
* ->f_mappping->i_shared_lock if vm_file is non-NULL.
*/
static void
@@ -326,8 +326,8 @@ __insert_vm_struct(struct mm_struct * mm
}
/*
- * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that is
- * already present in an i_mmap{_shared} tree without adjusting the tree.
+ * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
+ * is already present in an i_mmap tree without adjusting the tree.
* The following helper function should be used when such adjustments
* are necessary. The "next" vma (if any) is to be removed or inserted
* before we drop the necessary locks.
@@ -342,10 +342,8 @@ void vma_adjust(struct vm_area_struct *v
if (file) {
mapping = file->f_mapping;
- if (!(vma->vm_flags & VM_SHARED))
+ if (!(vma->vm_flags & VM_NONLINEAR))
root = &mapping->i_mmap;
- else if (!(vma->vm_flags & VM_NONLINEAR))
- root = &mapping->i_mmap_shared;
spin_lock(&mapping->i_shared_lock);
}
spin_lock(&mm->page_table_lock);
@@ -1513,7 +1511,7 @@ void exit_mmap(struct mm_struct *mm)
}
/* Insert vm structure into process list sorted by address
- * and into the inode's i_mmap ring. If vm_file is non-NULL
+ * and into the inode's i_mmap tree. If vm_file is non-NULL
* then i_shared_lock is taken here.
*/
void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
--- 2.6.6-rc3-mm1/mm/prio_tree.c 2004-04-30 11:58:47.000000000 +0100
+++ rmap20/mm/prio_tree.c 2004-05-04 21:21:28.895450008 +0100
@@ -1,5 +1,5 @@
/*
- * mm/prio_tree.c - priority search tree for mapping->i_mmap{,_shared}
+ * mm/prio_tree.c - priority search tree for mapping->i_mmap
*
* Copyright (C) 2004, Rajesh Venkatasubramanian <vrajesh@umich.edu>
*
@@ -41,7 +41,7 @@
*/
/*
- * The following macros are used for implementing prio_tree for i_mmap{_shared}
+ * The following macros are used for implementing prio_tree for i_mmap
*/
#define RADIX_INDEX(vma) ((vma)->vm_pgoff)
@@ -491,7 +491,7 @@ repeat:
}
/*
- * Radix priority search tree for address_space->i_mmap_{_shared}
+ * Radix priority search tree for address_space->i_mmap
*
* For each vma that map a unique set of file pages i.e., unique [radix_index,
* heap_index] value, we have a corresponing priority search tree node. If
--- 2.6.6-rc3-mm1/mm/rmap.c 2004-04-30 11:58:47.000000000 +0100
+++ rmap20/mm/rmap.c 2004-05-04 21:21:28.897449704 +0100
@@ -331,21 +331,6 @@ static inline int page_referenced_file(s
}
}
- while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap_shared,
- &iter, pgoff, pgoff)) != NULL) {
- if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) {
- referenced++;
- goto out;
- }
- if (vma->vm_mm->rss) {
- address = vma_address(vma, pgoff);
- referenced += page_referenced_one(page,
- vma->vm_mm, address, &mapcount, &failed);
- if (!mapcount)
- goto out;
- }
- }
-
if (list_empty(&mapping->i_mmap_nonlinear))
WARN_ON(!failed);
out:
@@ -734,17 +719,6 @@ static inline int try_to_unmap_file(stru
}
}
- while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap_shared,
- &iter, pgoff, pgoff)) != NULL) {
- if (vma->vm_mm->rss) {
- address = vma_address(vma, pgoff);
- ret = try_to_unmap_one(page,
- vma->vm_mm, address, &mapcount, vma);
- if (ret == SWAP_FAIL || !mapcount)
- goto out;
- }
- }
-
if (list_empty(&mapping->i_mmap_nonlinear))
goto out;
^ permalink raw reply [flat|nested] 8+ messages in thread* [PATCH] rmap 21 try_to_unmap_one mapcount
2004-05-04 22:18 [PATCH] rmap 20 i_mmap_shared into i_mmap Hugh Dickins
@ 2004-05-04 22:19 ` Hugh Dickins
2004-05-04 22:22 ` [PATCH] rmap 22 flush_dcache_mmap_lock Hugh Dickins
2004-05-04 22:28 ` [PATCH] rmap 23 empty flush_dcache_mmap_lock Hugh Dickins
2 siblings, 0 replies; 8+ messages in thread
From: Hugh Dickins @ 2004-05-04 22:19 UTC (permalink / raw)
To: Andrew Morton; +Cc: Martin J. Bligh, linux-kernel
Why should try_to_unmap_anon and try_to_unmap_file take a copy of
page->mapcount and pass it down for try_to_unmap_one to decrement?
why not just check page->mapcount itself? asks akpm. Perhaps there
used to be a good reason, but not any more: remove the mapcount arg.
mm/rmap.c | 26 ++++++++++----------------
1 files changed, 10 insertions(+), 16 deletions(-)
--- rmap20/mm/rmap.c 2004-05-04 21:21:28.897449704 +0100
+++ rmap21/mm/rmap.c 2004-05-04 21:21:39.928772688 +0100
@@ -466,9 +466,8 @@ int fastcall mremap_move_anon_rmap(struc
** repeatedly from either try_to_unmap_anon or try_to_unmap_file.
**/
-static int try_to_unmap_one(struct page *page,
- struct mm_struct *mm, unsigned long address,
- unsigned int *mapcount, struct vm_area_struct *vma)
+static int try_to_unmap_one(struct page *page, struct mm_struct *mm,
+ unsigned long address, struct vm_area_struct *vma)
{
pgd_t *pgd;
pmd_t *pmd;
@@ -498,8 +497,6 @@ static int try_to_unmap_one(struct page
if (page_to_pfn(page) != pte_pfn(*pte))
goto out_unmap;
- (*mapcount)--;
-
if (!vma) {
vma = find_vma(mm, address);
/* unmap_vmas drops page_table_lock with vma unlinked */
@@ -643,7 +640,6 @@ out_unlock:
static inline int try_to_unmap_anon(struct page *page)
{
- unsigned int mapcount = page->mapcount;
struct anonmm *anonmm = (struct anonmm *) page->mapping;
struct anonmm *anonhd = anonmm->head;
struct list_head *seek_head;
@@ -654,9 +650,8 @@ static inline int try_to_unmap_anon(stru
* First try the indicated mm, it's the most likely.
*/
if (anonmm->mm && anonmm->mm->rss) {
- ret = try_to_unmap_one(page,
- anonmm->mm, page->index, &mapcount, NULL);
- if (ret == SWAP_FAIL || !mapcount)
+ ret = try_to_unmap_one(page, anonmm->mm, page->index, NULL);
+ if (ret == SWAP_FAIL || !page->mapcount)
goto out;
}
@@ -670,9 +665,8 @@ static inline int try_to_unmap_anon(stru
list_for_each_entry(anonmm, seek_head, list) {
if (!anonmm->mm || !anonmm->mm->rss)
continue;
- ret = try_to_unmap_one(page,
- anonmm->mm, page->index, &mapcount, NULL);
- if (ret == SWAP_FAIL || !mapcount)
+ ret = try_to_unmap_one(page, anonmm->mm, page->index, NULL);
+ if (ret == SWAP_FAIL || !page->mapcount)
goto out;
}
out:
@@ -694,7 +688,6 @@ out:
*/
static inline int try_to_unmap_file(struct page *page)
{
- unsigned int mapcount = page->mapcount;
struct address_space *mapping = page->mapping;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
struct vm_area_struct *vma = NULL;
@@ -704,6 +697,7 @@ static inline int try_to_unmap_file(stru
unsigned long cursor;
unsigned long max_nl_cursor = 0;
unsigned long max_nl_size = 0;
+ unsigned int mapcount;
if (!spin_trylock(&mapping->i_shared_lock))
return ret;
@@ -712,9 +706,8 @@ static inline int try_to_unmap_file(stru
&iter, pgoff, pgoff)) != NULL) {
if (vma->vm_mm->rss) {
address = vma_address(vma, pgoff);
- ret = try_to_unmap_one(page,
- vma->vm_mm, address, &mapcount, vma);
- if (ret == SWAP_FAIL || !mapcount)
+ ret = try_to_unmap_one(page, vma->vm_mm, address, vma);
+ if (ret == SWAP_FAIL || !page->mapcount)
goto out;
}
}
@@ -744,6 +737,7 @@ static inline int try_to_unmap_file(stru
* The mapcount of the page we came in with is irrelevant,
* but even so use it as a guide to how hard we should try?
*/
+ mapcount = page->mapcount;
rmap_unlock(page);
cond_resched_lock(&mapping->i_shared_lock);
^ permalink raw reply [flat|nested] 8+ messages in thread* [PATCH] rmap 22 flush_dcache_mmap_lock
2004-05-04 22:18 [PATCH] rmap 20 i_mmap_shared into i_mmap Hugh Dickins
2004-05-04 22:19 ` [PATCH] rmap 21 try_to_unmap_one mapcount Hugh Dickins
@ 2004-05-04 22:22 ` Hugh Dickins
2004-05-04 22:40 ` Andrew Morton
2004-05-04 22:53 ` James Bottomley
2004-05-04 22:28 ` [PATCH] rmap 23 empty flush_dcache_mmap_lock Hugh Dickins
2 siblings, 2 replies; 8+ messages in thread
From: Hugh Dickins @ 2004-05-04 22:22 UTC (permalink / raw)
To: Andrew Morton
Cc: Martin J. Bligh, Russell King, James Bottomley, linux-kernel
arm and parisc __flush_dcache_page have been scanning the i_mmap(_shared)
list without locking or disabling preemption. That may be even more
unsafe now it's a prio tree instead of a list.
It looks like we cannot use i_shared_lock for this protection: most uses
of flush_dcache_page are okay, and only one would need lock ordering
fixed (get_user_pages holds page_table_lock across flush_dcache_page);
but there's a few (e.g. in net and ntfs) which look as if they're using
it in I/O completion - and it would be restrictive to disallow it there.
So, on arm and parisc only, define flush_dcache_mmap_lock(mapping) as
spin_lock_irq(&(mapping)->tree_lock); on i386 (and other arches left
to the next patch) define it away to nothing; and use where needed.
While updating locking hierarchy in filemap.c, remove two layers of the
fossil record from add_to_page_cache comment: no longer used for swap.
I believe all the #includes will work out, but have only built i386.
I can see several things about this patch which might cause revulsion:
the name flush_dcache_mmap_lock? the reuse of the page radix_tree's
tree_lock for this different purpose? spin_lock_irqsave instead?
can't we somehow get i_shared_lock to handle the problem?
arch/arm/mm/fault-armv.c | 5 +++++
arch/parisc/kernel/cache.c | 2 ++
include/asm-arm/cacheflush.h | 5 +++++
include/asm-i386/cacheflush.h | 2 ++
include/asm-parisc/cacheflush.h | 5 +++++
kernel/fork.c | 2 ++
mm/filemap.c | 10 +++-------
mm/fremap.c | 2 ++
mm/mmap.c | 10 +++++++++-
9 files changed, 35 insertions(+), 8 deletions(-)
--- rmap21/arch/arm/mm/fault-armv.c 2004-05-04 21:21:28.883451832 +0100
+++ rmap22/arch/arm/mm/fault-armv.c 2004-05-04 21:21:50.954096584 +0100
@@ -94,6 +94,8 @@ void __flush_dcache_page(struct page *pa
* and invalidate any user data.
*/
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+
+ flush_dcache_mmap_lock(mapping);
while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
&iter, pgoff, pgoff)) != NULL) {
/*
@@ -106,6 +108,7 @@ void __flush_dcache_page(struct page *pa
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
flush_cache_page(mpnt, mpnt->vm_start + offset);
}
+ flush_dcache_mmap_unlock(mapping);
}
static void
@@ -129,6 +132,7 @@ make_coherent(struct vm_area_struct *vma
* space, then we need to handle them specially to maintain
* cache coherency.
*/
+ flush_dcache_mmap_lock(mapping);
while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
&iter, pgoff, pgoff)) != NULL) {
/*
@@ -143,6 +147,7 @@ make_coherent(struct vm_area_struct *vma
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
aliases += adjust_pte(mpnt, mpnt->vm_start + offset);
}
+ flush_dcache_mmap_unlock(mapping);
if (aliases)
adjust_pte(vma, addr);
else
--- rmap21/arch/parisc/kernel/cache.c 2004-05-04 21:21:28.883451832 +0100
+++ rmap22/arch/parisc/kernel/cache.c 2004-05-04 21:21:50.955096432 +0100
@@ -244,6 +244,7 @@ void __flush_dcache_page(struct page *pa
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ flush_dcache_mmap_lock(mapping);
while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
&iter, pgoff, pgoff)) != NULL) {
/*
@@ -262,6 +263,7 @@ void __flush_dcache_page(struct page *pa
*/
break;
}
+ flush_dcache_mmap_unlock(mapping);
}
EXPORT_SYMBOL(__flush_dcache_page);
--- rmap21/include/asm-arm/cacheflush.h 2004-04-28 07:07:11.000000000 +0100
+++ rmap22/include/asm-arm/cacheflush.h 2004-05-04 21:21:50.956096280 +0100
@@ -303,6 +303,11 @@ static inline void flush_dcache_page(str
__flush_dcache_page(page);
}
+#define flush_dcache_mmap_lock(mapping) \
+ spin_lock_irq(&(mapping)->tree_lock)
+#define flush_dcache_mmap_unlock(mapping) \
+ spin_unlock_irq(&(mapping)->tree_lock)
+
#define flush_icache_user_range(vma,page,addr,len) \
flush_dcache_page(page)
--- rmap21/include/asm-i386/cacheflush.h 2003-10-08 20:24:56.000000000 +0100
+++ rmap22/include/asm-i386/cacheflush.h 2004-05-04 21:21:50.956096280 +0100
@@ -10,6 +10,8 @@
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
--- rmap21/include/asm-parisc/cacheflush.h 2004-04-28 07:07:13.000000000 +0100
+++ rmap22/include/asm-parisc/cacheflush.h 2004-05-04 21:21:50.957096128 +0100
@@ -78,6 +78,11 @@ static inline void flush_dcache_page(str
}
}
+#define flush_dcache_mmap_lock(mapping) \
+ spin_lock_irq(&(mapping)->tree_lock)
+#define flush_dcache_mmap_unlock(mapping) \
+ spin_unlock_irq(&(mapping)->tree_lock)
+
#define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page_address(page)); flush_kernel_icache_page(page_address(page)); } while (0)
#define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
--- rmap21/kernel/fork.c 2004-04-30 11:58:46.000000000 +0100
+++ rmap22/kernel/fork.c 2004-05-04 21:21:50.958095976 +0100
@@ -331,7 +331,9 @@ static inline int dup_mmap(struct mm_str
/* insert tmp into the share list, just after mpnt */
spin_lock(&file->f_mapping->i_shared_lock);
+ flush_dcache_mmap_lock(mapping);
vma_prio_tree_add(tmp, mpnt);
+ flush_dcache_mmap_unlock(mapping);
spin_unlock(&file->f_mapping->i_shared_lock);
}
--- rmap21/mm/filemap.c 2004-04-30 11:58:47.000000000 +0100
+++ rmap22/mm/filemap.c 2004-05-04 21:21:50.960095672 +0100
@@ -65,7 +65,9 @@
* ->i_shared_lock (truncate->unmap_mapping_range)
*
* ->mmap_sem
- * ->i_shared_lock (various places)
+ * ->i_shared_lock
+ * ->page_table_lock (various places, mainly in mmap.c)
+ * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
*
* ->mmap_sem
* ->lock_page (access_process_vm)
@@ -240,12 +242,6 @@ int filemap_write_and_wait(struct addres
}
/*
- * This adds a page to the page cache, starting out as locked, unreferenced,
- * not uptodate and with no errors.
- *
- * This function is used for two things: adding newly allocated pagecache
- * pages and for moving existing anon pages into swapcache.
- *
* This function is used to add newly allocated pagecache pages:
* the page is new, so we can just run SetPageLocked() against it.
* The other page state flags were set by rmqueue().
--- rmap21/mm/fremap.c 2004-05-04 21:21:28.890450768 +0100
+++ rmap22/mm/fremap.c 2004-05-04 21:21:50.961095520 +0100
@@ -202,11 +202,13 @@ asmlinkage long sys_remap_file_pages(uns
!(vma->vm_flags & VM_NONLINEAR)) {
mapping = vma->vm_file->f_mapping;
spin_lock(&mapping->i_shared_lock);
+ flush_dcache_mmap_lock(mapping);
vma->vm_flags |= VM_NONLINEAR;
vma_prio_tree_remove(vma, &mapping->i_mmap);
vma_prio_tree_init(vma);
list_add_tail(&vma->shared.vm_set.list,
&mapping->i_mmap_nonlinear);
+ flush_dcache_mmap_unlock(mapping);
spin_unlock(&mapping->i_shared_lock);
}
--- rmap21/mm/mmap.c 2004-05-04 21:21:28.894450160 +0100
+++ rmap22/mm/mmap.c 2004-05-04 21:21:50.963095216 +0100
@@ -23,6 +23,7 @@
#include <linux/mount.h>
#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
#include <asm/tlb.h>
/*
@@ -72,10 +73,12 @@ static inline void __remove_shared_vm_st
if (vma->vm_flags & VM_SHARED)
mapping->i_mmap_writable--;
+ flush_dcache_mmap_lock(mapping);
if (unlikely(vma->vm_flags & VM_NONLINEAR))
list_del_init(&vma->shared.vm_set.list);
else
vma_prio_tree_remove(vma, &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
}
/*
@@ -264,11 +267,13 @@ static inline void __vma_link_file(struc
if (vma->vm_flags & VM_SHARED)
mapping->i_mmap_writable++;
+ flush_dcache_mmap_lock(mapping);
if (unlikely(vma->vm_flags & VM_NONLINEAR))
list_add_tail(&vma->shared.vm_set.list,
&mapping->i_mmap_nonlinear);
else
vma_prio_tree_insert(vma, &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
}
}
@@ -348,14 +353,17 @@ void vma_adjust(struct vm_area_struct *v
}
spin_lock(&mm->page_table_lock);
- if (root)
+ if (root) {
+ flush_dcache_mmap_lock(mapping);
vma_prio_tree_remove(vma, root);
+ }
vma->vm_start = start;
vma->vm_end = end;
vma->vm_pgoff = pgoff;
if (root) {
vma_prio_tree_init(vma);
vma_prio_tree_insert(vma, root);
+ flush_dcache_mmap_unlock(mapping);
}
if (next) {
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH] rmap 22 flush_dcache_mmap_lock
2004-05-04 22:22 ` [PATCH] rmap 22 flush_dcache_mmap_lock Hugh Dickins
@ 2004-05-04 22:40 ` Andrew Morton
2004-05-05 0:12 ` Hugh Dickins
2004-05-04 22:53 ` James Bottomley
1 sibling, 1 reply; 8+ messages in thread
From: Andrew Morton @ 2004-05-04 22:40 UTC (permalink / raw)
To: Hugh Dickins; +Cc: mbligh, rmk, James.Bottomley, linux-kernel
Hugh Dickins <hugh@veritas.com> wrote:
>
> --- rmap21/include/asm-i386/cacheflush.h 2003-10-08 20:24:56.000000000 +0100
> +++ rmap22/include/asm-i386/cacheflush.h 2004-05-04 21:21:50.956096280 +0100
> @@ -10,6 +10,8 @@
> #define flush_cache_range(vma, start, end) do { } while (0)
> #define flush_cache_page(vma, vmaddr) do { } while (0)
> #define flush_dcache_page(page) do { } while (0)
> +#define flush_dcache_mmap_lock(mapping) do { } while (0)
> +#define flush_dcache_mmap_unlock(mapping) do { } while (0)
Looks like this patch will break a lot of architectures. Was that
intentional?
If not, and if you expect that all other architectures do not need the lock
then the above could be cast as:
#ifndef flush_dcache_mmap_lock
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#endif
in some generic file.
wrt overloading of tree_lock: The main drawback is that the VM lock ranking
is now dependent upon the architecture. That, plus the dang thing is
undocumented!
And it seems strange to be grabbing that lock expecting that it will
protect the tree which is elsewhere protected by a different lock. You
sure this is correct?
I wonder if it wouldn't be better to simply make i_shared_lock irq-safe?
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH] rmap 22 flush_dcache_mmap_lock
2004-05-04 22:40 ` Andrew Morton
@ 2004-05-05 0:12 ` Hugh Dickins
0 siblings, 0 replies; 8+ messages in thread
From: Hugh Dickins @ 2004-05-05 0:12 UTC (permalink / raw)
To: Andrew Morton; +Cc: mbligh, rmk, James.Bottomley, linux-kernel
On Tue, 4 May 2004, Andrew Morton wrote:
>
> Looks like this patch will break a lot of architectures. Was that
> intentional?
Didn't you get the next patch, rmap 23? That does the other arches.
> If not, and if you expect that all other architectures do not need the lock
> then the above could be cast as:
>
> #ifndef flush_dcache_mmap_lock
> #define flush_dcache_mmap_lock(mapping) do { } while (0)
> #define flush_dcache_mmap_unlock(mapping) do { } while (0)
> #endif
>
> in some generic file.
I was very tempted to do so, but it's not the style for flush_dcache_page
& friends, so I updated each asm/cacheflush.h (well, sh goes down a level).
> wrt overloading of tree_lock: The main drawback is that the VM lock ranking
> is now dependent upon the architecture. That, plus the dang thing is
> undocumented!
Not really: there's an extra level at the bottom for two of the arches.
Documented with the rest in filemap.c (though I didn't commit to which
arches there).
> And it seems strange to be grabbing that lock expecting that it will
> protect the tree which is elsewhere protected by a different lock. You
> sure this is correct?
I most certainly agree it isn't pretty, I'd gladly solve it a
better way - if you or someone else can tell me that better way.
Obviously we could add another spinlock rather than reusing tree_lock,
but I don't think that'll help much: still regrettable.
> I wonder if it wouldn't be better to simply make i_shared_lock irq-safe?
A sixth sense tells me that we don't want to disable interrupts
throughout vmtruncate's unmapping of pages from vmas...
But perhaps there's another way of looking at the problem.
Hugh
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH] rmap 22 flush_dcache_mmap_lock
2004-05-04 22:22 ` [PATCH] rmap 22 flush_dcache_mmap_lock Hugh Dickins
2004-05-04 22:40 ` Andrew Morton
@ 2004-05-04 22:53 ` James Bottomley
2004-05-05 0:29 ` Hugh Dickins
1 sibling, 1 reply; 8+ messages in thread
From: James Bottomley @ 2004-05-04 22:53 UTC (permalink / raw)
To: Hugh Dickins; +Cc: Andrew Morton, Martin J. Bligh, Russell King, Linux Kernel
On Tue, 2004-05-04 at 17:22, Hugh Dickins wrote:
> arm and parisc __flush_dcache_page have been scanning the i_mmap(_shared)
> list without locking or disabling preemption. That may be even more
> unsafe now it's a prio tree instead of a list.
>
> It looks like we cannot use i_shared_lock for this protection: most uses
> of flush_dcache_page are okay, and only one would need lock ordering
> fixed (get_user_pages holds page_table_lock across flush_dcache_page);
> but there's a few (e.g. in net and ntfs) which look as if they're using
> it in I/O completion - and it would be restrictive to disallow it there.
>
> So, on arm and parisc only, define flush_dcache_mmap_lock(mapping) as
> spin_lock_irq(&(mapping)->tree_lock); on i386 (and other arches left
> to the next patch) define it away to nothing; and use where needed.
>
> While updating locking hierarchy in filemap.c, remove two layers of the
> fossil record from add_to_page_cache comment: no longer used for swap.
>
> I believe all the #includes will work out, but have only built i386.
> I can see several things about this patch which might cause revulsion:
> the name flush_dcache_mmap_lock? the reuse of the page radix_tree's
> tree_lock for this different purpose? spin_lock_irqsave instead?
> can't we somehow get i_shared_lock to handle the problem?
Hugh,
I thought in a prior discussion with Andrea that there was a generic VM
i_mmap loop that can take rather a long time, and thus we didn't want a
spinlock for this, but a rwlock. Since our critical regions in the
cache flushing are read only, only i_mmap updates (which are short
critical regions) take the write lock with irqsave, all the rest take
the shared read lock with irq.
Unless you've eliminated this long scan from the generic VM, I think the
idea is still better than a simple spinlock.
James
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH] rmap 22 flush_dcache_mmap_lock
2004-05-04 22:53 ` James Bottomley
@ 2004-05-05 0:29 ` Hugh Dickins
0 siblings, 0 replies; 8+ messages in thread
From: Hugh Dickins @ 2004-05-05 0:29 UTC (permalink / raw)
To: James Bottomley
Cc: Andrew Morton, Martin J. Bligh, Russell King, Linux Kernel
On 4 May 2004, James Bottomley wrote:
>
> I thought in a prior discussion with Andrea that there was a generic VM
> i_mmap loop that can take rather a long time, and thus we didn't want a
There is indeed, that's vmtruncate truncating pages out of all the
vmas which might contain it. That loop is the one guarded by
i_shared_lock (i_shared_sem in 2.6.6 itself).
> spinlock for this, but a rwlock. Since our critical regions in the
> cache flushing are read only, only i_mmap updates (which are short
> critical regions) take the write lock with irqsave, all the rest take
> the shared read lock with irq.
That's why I'm using the separate low-level tree_lock in addition
for your flush_dcache_page: it's not held over all that vmtruncate,
just at those moments someone needs to alter the i_mmap tree itself
in some way, very brief - not while someone is working on vmas in
that tree. Yes, serializing you with the i_shared_lock would be
very bad news in some cases.
(I don't get your point about _irqsave for writes but _irq for
reads: do I need to do something like that? I don't see why.)
Using an rwlock would provide another solution; but I'm dubious of
that solution since the majority of traffic on that lock would be
"w"s (inserting and removing vmas) rather than "r"s (scanning tree) -
at least, I think that would be the case on the hot paths of other
architectures than parisc and arm. I'm guessing efficiency is
targetted at the opposite, "r"s much more common than "w"s.
> Unless you've eliminated this long scan from the generic VM, I think the
> idea is still better than a simple spinlock.
The long scan is not eliminated, but the spinlock I'm proposing
in this patch is (by design) quite independent of that - you can
flush_dcache_page to your heart's content while vmtruncate is in
progress, it wouldn't be locking it out at all.
Hugh
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH] rmap 23 empty flush_dcache_mmap_lock
2004-05-04 22:18 [PATCH] rmap 20 i_mmap_shared into i_mmap Hugh Dickins
2004-05-04 22:19 ` [PATCH] rmap 21 try_to_unmap_one mapcount Hugh Dickins
2004-05-04 22:22 ` [PATCH] rmap 22 flush_dcache_mmap_lock Hugh Dickins
@ 2004-05-04 22:28 ` Hugh Dickins
2 siblings, 0 replies; 8+ messages in thread
From: Hugh Dickins @ 2004-05-04 22:28 UTC (permalink / raw)
To: Andrew Morton
Cc: Martin J. Bligh, rth, spyro, bjornw, ysato, davidm, jes, gerg,
ralf, paulus, anton, schwidefsky, lethal, wesolows, davem, miles,
ak, linux-kernel
Most architectures (like i386) do nothing in flush_dcache_page, or don't
scan i_mmap in flush_dcache_page, so don't need flush_dcache_mmap_lock
to do anything: define it and flush_dcache_mmap_unlock away. Noticed
arm26, cris, h8300 still defining flush_page_to_ram: delete it again.
include/asm-alpha/cacheflush.h | 2 ++
include/asm-arm26/cacheflush.h | 3 ++-
include/asm-cris/cacheflush.h | 3 ++-
include/asm-h8300/cacheflush.h | 3 ++-
include/asm-ia64/cacheflush.h | 3 +++
include/asm-m68k/cacheflush.h | 2 ++
include/asm-m68knommu/cacheflush.h | 2 ++
include/asm-mips/cacheflush.h | 3 +++
include/asm-ppc/cacheflush.h | 3 +++
include/asm-ppc64/cacheflush.h | 3 +++
include/asm-s390/cacheflush.h | 2 ++
include/asm-sh/cpu-sh2/cacheflush.h | 2 ++
include/asm-sh/cpu-sh3/cacheflush.h | 2 ++
include/asm-sh/cpu-sh4/cacheflush.h | 4 ++++
include/asm-sparc/cacheflush.h | 2 ++
include/asm-sparc64/cacheflush.h | 2 ++
include/asm-v850/cacheflush.h | 2 ++
include/asm-x86_64/cacheflush.h | 2 ++
18 files changed, 42 insertions(+), 3 deletions(-)
--- rmap22/include/asm-alpha/cacheflush.h 2003-10-08 20:24:57.000000000 +0100
+++ rmap23/include/asm-alpha/cacheflush.h 2004-05-04 21:22:02.003416832 +0100
@@ -10,6 +10,8 @@
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
--- rmap22/include/asm-arm26/cacheflush.h 2003-10-08 20:24:57.000000000 +0100
+++ rmap23/include/asm-arm26/cacheflush.h 2004-05-04 21:22:02.004416680 +0100
@@ -24,7 +24,6 @@
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma,start,end) do { } while (0)
#define flush_cache_page(vma,vmaddr) do { } while (0)
-#define flush_page_to_ram(page) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
@@ -32,6 +31,8 @@
#define clean_dcache_range(start,end) do { } while (0)
#define flush_dcache_range(start,end) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define clean_dcache_entry(_s) do { } while (0)
#define clean_cache_entry(_start) do { } while (0)
--- rmap22/include/asm-cris/cacheflush.h 2003-10-08 20:24:55.000000000 +0100
+++ rmap23/include/asm-cris/cacheflush.h 2004-05-04 21:22:02.004416680 +0100
@@ -11,8 +11,9 @@
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
-#define flush_page_to_ram(page) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
--- rmap22/include/asm-h8300/cacheflush.h 2003-10-08 20:24:56.000000000 +0100
+++ rmap23/include/asm-h8300/cacheflush.h 2004-05-04 21:22:02.005416528 +0100
@@ -14,8 +14,9 @@
#define flush_cache_mm(mm)
#define flush_cache_range(vma,a,b)
#define flush_cache_page(vma,p)
-#define flush_page_to_ram(page)
#define flush_dcache_page(page)
+#define flush_dcache_mmap_lock(mapping)
+#define flush_dcache_mmap_unlock(mapping)
#define flush_icache()
#define flush_icache_page(vma,page)
#define flush_icache_range(start,len)
--- rmap22/include/asm-ia64/cacheflush.h 2003-10-08 20:24:56.000000000 +0100
+++ rmap23/include/asm-ia64/cacheflush.h 2004-05-04 21:22:02.006416376 +0100
@@ -29,6 +29,9 @@ do { \
clear_bit(PG_arch_1, &(page)->flags); \
} while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
extern void flush_icache_range (unsigned long start, unsigned long end);
#define flush_icache_user_range(vma, page, user_addr, len) \
--- rmap22/include/asm-m68k/cacheflush.h 2004-02-04 02:45:30.000000000 +0000
+++ rmap23/include/asm-m68k/cacheflush.h 2004-05-04 21:22:02.007416224 +0100
@@ -128,6 +128,8 @@ static inline void __flush_page_to_ram(v
}
#define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
--- rmap22/include/asm-m68knommu/cacheflush.h 2003-10-08 20:24:57.000000000 +0100
+++ rmap23/include/asm-m68knommu/cacheflush.h 2004-05-04 21:22:02.007416224 +0100
@@ -12,6 +12,8 @@
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_range(start,len) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start,len) __flush_cache_all()
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
--- rmap22/include/asm-mips/cacheflush.h 2004-04-28 07:07:12.000000000 +0100
+++ rmap23/include/asm-mips/cacheflush.h 2004-05-04 21:22:02.008416072 +0100
@@ -45,6 +45,9 @@ static inline void flush_dcache_page(str
}
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
extern void (*flush_icache_page)(struct vm_area_struct *vma,
struct page *page);
extern void (*flush_icache_range)(unsigned long start, unsigned long end);
--- rmap22/include/asm-ppc/cacheflush.h 2003-10-08 20:24:57.000000000 +0100
+++ rmap23/include/asm-ppc/cacheflush.h 2004-05-04 21:22:02.009415920 +0100
@@ -28,6 +28,9 @@
#define flush_cache_vunmap(start, end) do { } while (0)
extern void flush_dcache_page(struct page *page);
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
extern void flush_icache_range(unsigned long, unsigned long);
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len);
--- rmap22/include/asm-ppc64/cacheflush.h 2004-04-28 07:07:13.000000000 +0100
+++ rmap23/include/asm-ppc64/cacheflush.h 2004-05-04 21:22:02.010415768 +0100
@@ -18,6 +18,9 @@
#define flush_cache_vunmap(start, end) do { } while (0)
extern void flush_dcache_page(struct page *page);
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
extern void __flush_icache_range(unsigned long, unsigned long);
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr,
--- rmap22/include/asm-s390/cacheflush.h 2003-10-08 20:24:57.000000000 +0100
+++ rmap23/include/asm-s390/cacheflush.h 2004-05-04 21:22:02.010415768 +0100
@@ -10,6 +10,8 @@
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
--- rmap22/include/asm-sh/cpu-sh2/cacheflush.h 2003-07-02 22:00:48.000000000 +0100
+++ rmap23/include/asm-sh/cpu-sh2/cacheflush.h 2004-05-04 21:22:02.011415616 +0100
@@ -30,6 +30,8 @@
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
--- rmap22/include/asm-sh/cpu-sh3/cacheflush.h 2004-04-04 03:38:43.000000000 +0100
+++ rmap23/include/asm-sh/cpu-sh3/cacheflush.h 2004-05-04 21:22:02.012415464 +0100
@@ -30,6 +30,8 @@
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
--- rmap22/include/asm-sh/cpu-sh4/cacheflush.h 2003-07-02 22:00:46.000000000 +0100
+++ rmap23/include/asm-sh/cpu-sh4/cacheflush.h 2004-05-04 21:22:02.013415312 +0100
@@ -30,6 +30,10 @@ extern void flush_cache_range(struct vm_
unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr);
extern void flush_dcache_page(struct page *pg);
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_cache_sigtramp(unsigned long addr);
extern void flush_icache_user_range(struct vm_area_struct *vma,
--- rmap22/include/asm-sparc/cacheflush.h 2003-10-08 20:24:56.000000000 +0100
+++ rmap23/include/asm-sparc/cacheflush.h 2004-05-04 21:22:02.013415312 +0100
@@ -70,6 +70,8 @@ BTFIXUPDEF_CALL(void, flush_sig_insns, s
extern void sparc_flush_page_to_ram(struct page *page);
#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
--- rmap22/include/asm-sparc64/cacheflush.h 2003-10-08 20:24:57.000000000 +0100
+++ rmap23/include/asm-sparc64/cacheflush.h 2004-05-04 21:22:02.014415160 +0100
@@ -42,6 +42,8 @@ extern void __flush_dcache_range(unsigne
memcpy(dst, src, len)
extern void flush_dcache_page(struct page *page);
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
--- rmap22/include/asm-v850/cacheflush.h 2003-10-08 20:24:56.000000000 +0100
+++ rmap23/include/asm-v850/cacheflush.h 2004-05-04 21:22:02.015415008 +0100
@@ -27,6 +27,8 @@
#define flush_cache_range(vma, start, end) ((void)0)
#define flush_cache_page(vma, vmaddr) ((void)0)
#define flush_dcache_page(page) ((void)0)
+#define flush_dcache_mmap_lock(mapping) ((void)0)
+#define flush_dcache_mmap_unlock(mapping) ((void)0)
#define flush_cache_vmap(start, end) ((void)0)
#define flush_cache_vunmap(start, end) ((void)0)
--- rmap22/include/asm-x86_64/cacheflush.h 2003-10-08 20:24:56.000000000 +0100
+++ rmap23/include/asm-x86_64/cacheflush.h 2004-05-04 21:22:02.016414856 +0100
@@ -10,6 +10,8 @@
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2004-05-05 0:29 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2004-05-04 22:18 [PATCH] rmap 20 i_mmap_shared into i_mmap Hugh Dickins
2004-05-04 22:19 ` [PATCH] rmap 21 try_to_unmap_one mapcount Hugh Dickins
2004-05-04 22:22 ` [PATCH] rmap 22 flush_dcache_mmap_lock Hugh Dickins
2004-05-04 22:40 ` Andrew Morton
2004-05-05 0:12 ` Hugh Dickins
2004-05-04 22:53 ` James Bottomley
2004-05-05 0:29 ` Hugh Dickins
2004-05-04 22:28 ` [PATCH] rmap 23 empty flush_dcache_mmap_lock Hugh Dickins
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox