linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: akpm@linux-foundation.org
Cc: peterz@infradead.org, mingo@kernel.org, davej@redhat.com,
	sasha.levin@oracle.com, hughd@google.com, linux-mm@kvack.org,
	linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH 01/38] mm: drop support of non-linear mapping from unmap/zap codepath
Date: Wed, 24 Dec 2014 14:22:09 +0200	[thread overview]
Message-ID: <1419423766-114457-2-git-send-email-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <1419423766-114457-1-git-send-email-kirill.shutemov@linux.intel.com>

We don't create non-linear mappings anymore. Let's drop code which
handles them on unmap/zap.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 include/linux/mm.h |  1 -
 mm/madvise.c       |  9 +-----
 mm/memory.c        | 82 ++++++++++++++----------------------------------------
 3 files changed, 22 insertions(+), 70 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index f80d0194c9bc..07574d8072f4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1119,7 +1119,6 @@ extern void user_shm_unlock(size_t, struct user_struct *);
  * Parameter block passed down to zap_pte_range in exceptional cases.
  */
 struct zap_details {
-	struct vm_area_struct *nonlinear_vma;	/* Check page->index if set */
 	struct address_space *check_mapping;	/* Check page->mapping if set */
 	pgoff_t	first_index;			/* Lowest page->index to unmap */
 	pgoff_t last_index;			/* Highest page->index to unmap */
diff --git a/mm/madvise.c b/mm/madvise.c
index 6fc9b8298da1..8d74d7617598 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -419,14 +419,7 @@ static long madvise_dontneed(struct vm_area_struct *vma,
 	if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
 		return -EINVAL;
 
-	if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
-		struct zap_details details = {
-			.nonlinear_vma = vma,
-			.last_index = ULONG_MAX,
-		};
-		zap_page_range(vma, start, end - start, &details);
-	} else
-		zap_page_range(vma, start, end - start, NULL);
+	zap_page_range(vma, start, end - start, NULL);
 	return 0;
 }
 
diff --git a/mm/memory.c b/mm/memory.c
index 33f7370cc092..5216b91a714a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1082,6 +1082,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
 	spinlock_t *ptl;
 	pte_t *start_pte;
 	pte_t *pte;
+	swp_entry_t entry;
 
 again:
 	init_rss_vec(rss);
@@ -1107,28 +1108,12 @@ again:
 				if (details->check_mapping &&
 				    details->check_mapping != page->mapping)
 					continue;
-				/*
-				 * Each page->index must be checked when
-				 * invalidating or truncating nonlinear.
-				 */
-				if (details->nonlinear_vma &&
-				    (page->index < details->first_index ||
-				     page->index > details->last_index))
-					continue;
 			}
 			ptent = ptep_get_and_clear_full(mm, addr, pte,
 							tlb->fullmm);
 			tlb_remove_tlb_entry(tlb, pte, addr);
 			if (unlikely(!page))
 				continue;
-			if (unlikely(details) && details->nonlinear_vma
-			    && linear_page_index(details->nonlinear_vma,
-						addr) != page->index) {
-				pte_t ptfile = pgoff_to_pte(page->index);
-				if (pte_soft_dirty(ptent))
-					ptfile = pte_file_mksoft_dirty(ptfile);
-				set_pte_at(mm, addr, pte, ptfile);
-			}
 			if (PageAnon(page))
 				rss[MM_ANONPAGES]--;
 			else {
@@ -1151,33 +1136,25 @@ again:
 			}
 			continue;
 		}
-		/*
-		 * If details->check_mapping, we leave swap entries;
-		 * if details->nonlinear_vma, we leave file entries.
-		 */
+		/* If details->check_mapping, we leave swap entries. */
 		if (unlikely(details))
 			continue;
-		if (pte_file(ptent)) {
-			if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
-				print_bad_pte(vma, addr, ptent, NULL);
-		} else {
-			swp_entry_t entry = pte_to_swp_entry(ptent);
 
-			if (!non_swap_entry(entry))
-				rss[MM_SWAPENTS]--;
-			else if (is_migration_entry(entry)) {
-				struct page *page;
+		entry = pte_to_swp_entry(ptent);
+		if (!non_swap_entry(entry))
+			rss[MM_SWAPENTS]--;
+		else if (is_migration_entry(entry)) {
+			struct page *page;
 
-				page = migration_entry_to_page(entry);
+			page = migration_entry_to_page(entry);
 
-				if (PageAnon(page))
-					rss[MM_ANONPAGES]--;
-				else
-					rss[MM_FILEPAGES]--;
-			}
-			if (unlikely(!free_swap_and_cache(entry)))
-				print_bad_pte(vma, addr, ptent, NULL);
+			if (PageAnon(page))
+				rss[MM_ANONPAGES]--;
+			else
+				rss[MM_FILEPAGES]--;
 		}
+		if (unlikely(!free_swap_and_cache(entry)))
+			print_bad_pte(vma, addr, ptent, NULL);
 		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
 	} while (pte++, addr += PAGE_SIZE, addr != end);
 
@@ -1277,7 +1254,7 @@ static void unmap_page_range(struct mmu_gather *tlb,
 	pgd_t *pgd;
 	unsigned long next;
 
-	if (details && !details->check_mapping && !details->nonlinear_vma)
+	if (details && !details->check_mapping)
 		details = NULL;
 
 	BUG_ON(addr >= end);
@@ -1371,7 +1348,7 @@ void unmap_vmas(struct mmu_gather *tlb,
  * @vma: vm_area_struct holding the applicable pages
  * @start: starting address of pages to zap
  * @size: number of bytes to zap
- * @details: details of nonlinear truncation or shared cache invalidation
+ * @details: details of shared cache invalidation
  *
  * Caller must protect the VMA list
  */
@@ -1397,7 +1374,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
  * @vma: vm_area_struct holding the applicable pages
  * @address: starting address of pages to zap
  * @size: number of bytes to zap
- * @details: details of nonlinear truncation or shared cache invalidation
+ * @details: details of shared cache invalidation
  *
  * The range must fit into one VMA.
  */
@@ -2324,25 +2301,11 @@ static inline void unmap_mapping_range_tree(struct rb_root *root,
 	}
 }
 
-static inline void unmap_mapping_range_list(struct list_head *head,
-					    struct zap_details *details)
-{
-	struct vm_area_struct *vma;
-
-	/*
-	 * In nonlinear VMAs there is no correspondence between virtual address
-	 * offset and file offset.  So we must perform an exhaustive search
-	 * across *all* the pages in each nonlinear VMA, not just the pages
-	 * whose virtual address lies outside the file truncation point.
-	 */
-	list_for_each_entry(vma, head, shared.nonlinear) {
-		details->nonlinear_vma = vma;
-		unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details);
-	}
-}
-
 /**
- * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
+ * unmap_mapping_range - unmap the portion of all mmaps in the specified
+ * address_space corresponding to the specified page range in the underlying
+ * file.
+ *
  * @mapping: the address space containing mmaps to be unmapped.
  * @holebegin: byte in first page to unmap, relative to the start of
  * the underlying file.  This will be rounded down to a PAGE_SIZE
@@ -2371,7 +2334,6 @@ void unmap_mapping_range(struct address_space *mapping,
 	}
 
 	details.check_mapping = even_cows? NULL: mapping;
-	details.nonlinear_vma = NULL;
 	details.first_index = hba;
 	details.last_index = hba + hlen - 1;
 	if (details.last_index < details.first_index)
@@ -2381,8 +2343,6 @@ void unmap_mapping_range(struct address_space *mapping,
 	i_mmap_lock_read(mapping);
 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
 		unmap_mapping_range_tree(&mapping->i_mmap, &details);
-	if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
-		unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
 	i_mmap_unlock_read(mapping);
 }
 EXPORT_SYMBOL(unmap_mapping_range);
-- 
2.1.3

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

WARNING: multiple messages have this Message-ID (diff)
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: akpm@linux-foundation.org
Cc: peterz@infradead.org, mingo@kernel.org, davej@redhat.com,
	sasha.levin@oracle.com, hughd@google.com, linux-mm@kvack.org,
	linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH 01/38] mm: drop support of non-linear mapping from unmap/zap codepath
Date: Wed, 24 Dec 2014 14:22:09 +0200	[thread overview]
Message-ID: <1419423766-114457-2-git-send-email-kirill.shutemov@linux.intel.com> (raw)
Message-ID: <20141224122209.rPwP5ZchTUlNcWFzaX2YxSuE8uP-JJhbLgbc4IGMeu4@z> (raw)
In-Reply-To: <1419423766-114457-1-git-send-email-kirill.shutemov@linux.intel.com>

We don't create non-linear mappings anymore. Let's drop code which
handles them on unmap/zap.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 include/linux/mm.h |  1 -
 mm/madvise.c       |  9 +-----
 mm/memory.c        | 82 ++++++++++++++----------------------------------------
 3 files changed, 22 insertions(+), 70 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index f80d0194c9bc..07574d8072f4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1119,7 +1119,6 @@ extern void user_shm_unlock(size_t, struct user_struct *);
  * Parameter block passed down to zap_pte_range in exceptional cases.
  */
 struct zap_details {
-	struct vm_area_struct *nonlinear_vma;	/* Check page->index if set */
 	struct address_space *check_mapping;	/* Check page->mapping if set */
 	pgoff_t	first_index;			/* Lowest page->index to unmap */
 	pgoff_t last_index;			/* Highest page->index to unmap */
diff --git a/mm/madvise.c b/mm/madvise.c
index 6fc9b8298da1..8d74d7617598 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -419,14 +419,7 @@ static long madvise_dontneed(struct vm_area_struct *vma,
 	if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
 		return -EINVAL;
 
-	if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
-		struct zap_details details = {
-			.nonlinear_vma = vma,
-			.last_index = ULONG_MAX,
-		};
-		zap_page_range(vma, start, end - start, &details);
-	} else
-		zap_page_range(vma, start, end - start, NULL);
+	zap_page_range(vma, start, end - start, NULL);
 	return 0;
 }
 
diff --git a/mm/memory.c b/mm/memory.c
index 33f7370cc092..5216b91a714a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1082,6 +1082,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
 	spinlock_t *ptl;
 	pte_t *start_pte;
 	pte_t *pte;
+	swp_entry_t entry;
 
 again:
 	init_rss_vec(rss);
@@ -1107,28 +1108,12 @@ again:
 				if (details->check_mapping &&
 				    details->check_mapping != page->mapping)
 					continue;
-				/*
-				 * Each page->index must be checked when
-				 * invalidating or truncating nonlinear.
-				 */
-				if (details->nonlinear_vma &&
-				    (page->index < details->first_index ||
-				     page->index > details->last_index))
-					continue;
 			}
 			ptent = ptep_get_and_clear_full(mm, addr, pte,
 							tlb->fullmm);
 			tlb_remove_tlb_entry(tlb, pte, addr);
 			if (unlikely(!page))
 				continue;
-			if (unlikely(details) && details->nonlinear_vma
-			    && linear_page_index(details->nonlinear_vma,
-						addr) != page->index) {
-				pte_t ptfile = pgoff_to_pte(page->index);
-				if (pte_soft_dirty(ptent))
-					ptfile = pte_file_mksoft_dirty(ptfile);
-				set_pte_at(mm, addr, pte, ptfile);
-			}
 			if (PageAnon(page))
 				rss[MM_ANONPAGES]--;
 			else {
@@ -1151,33 +1136,25 @@ again:
 			}
 			continue;
 		}
-		/*
-		 * If details->check_mapping, we leave swap entries;
-		 * if details->nonlinear_vma, we leave file entries.
-		 */
+		/* If details->check_mapping, we leave swap entries. */
 		if (unlikely(details))
 			continue;
-		if (pte_file(ptent)) {
-			if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
-				print_bad_pte(vma, addr, ptent, NULL);
-		} else {
-			swp_entry_t entry = pte_to_swp_entry(ptent);
 
-			if (!non_swap_entry(entry))
-				rss[MM_SWAPENTS]--;
-			else if (is_migration_entry(entry)) {
-				struct page *page;
+		entry = pte_to_swp_entry(ptent);
+		if (!non_swap_entry(entry))
+			rss[MM_SWAPENTS]--;
+		else if (is_migration_entry(entry)) {
+			struct page *page;
 
-				page = migration_entry_to_page(entry);
+			page = migration_entry_to_page(entry);
 
-				if (PageAnon(page))
-					rss[MM_ANONPAGES]--;
-				else
-					rss[MM_FILEPAGES]--;
-			}
-			if (unlikely(!free_swap_and_cache(entry)))
-				print_bad_pte(vma, addr, ptent, NULL);
+			if (PageAnon(page))
+				rss[MM_ANONPAGES]--;
+			else
+				rss[MM_FILEPAGES]--;
 		}
+		if (unlikely(!free_swap_and_cache(entry)))
+			print_bad_pte(vma, addr, ptent, NULL);
 		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
 	} while (pte++, addr += PAGE_SIZE, addr != end);
 
@@ -1277,7 +1254,7 @@ static void unmap_page_range(struct mmu_gather *tlb,
 	pgd_t *pgd;
 	unsigned long next;
 
-	if (details && !details->check_mapping && !details->nonlinear_vma)
+	if (details && !details->check_mapping)
 		details = NULL;
 
 	BUG_ON(addr >= end);
@@ -1371,7 +1348,7 @@ void unmap_vmas(struct mmu_gather *tlb,
  * @vma: vm_area_struct holding the applicable pages
  * @start: starting address of pages to zap
  * @size: number of bytes to zap
- * @details: details of nonlinear truncation or shared cache invalidation
+ * @details: details of shared cache invalidation
  *
  * Caller must protect the VMA list
  */
@@ -1397,7 +1374,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
  * @vma: vm_area_struct holding the applicable pages
  * @address: starting address of pages to zap
  * @size: number of bytes to zap
- * @details: details of nonlinear truncation or shared cache invalidation
+ * @details: details of shared cache invalidation
  *
  * The range must fit into one VMA.
  */
@@ -2324,25 +2301,11 @@ static inline void unmap_mapping_range_tree(struct rb_root *root,
 	}
 }
 
-static inline void unmap_mapping_range_list(struct list_head *head,
-					    struct zap_details *details)
-{
-	struct vm_area_struct *vma;
-
-	/*
-	 * In nonlinear VMAs there is no correspondence between virtual address
-	 * offset and file offset.  So we must perform an exhaustive search
-	 * across *all* the pages in each nonlinear VMA, not just the pages
-	 * whose virtual address lies outside the file truncation point.
-	 */
-	list_for_each_entry(vma, head, shared.nonlinear) {
-		details->nonlinear_vma = vma;
-		unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details);
-	}
-}
-
 /**
- * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
+ * unmap_mapping_range - unmap the portion of all mmaps in the specified
+ * address_space corresponding to the specified page range in the underlying
+ * file.
+ *
  * @mapping: the address space containing mmaps to be unmapped.
  * @holebegin: byte in first page to unmap, relative to the start of
  * the underlying file.  This will be rounded down to a PAGE_SIZE
@@ -2371,7 +2334,6 @@ void unmap_mapping_range(struct address_space *mapping,
 	}
 
 	details.check_mapping = even_cows? NULL: mapping;
-	details.nonlinear_vma = NULL;
 	details.first_index = hba;
 	details.last_index = hba + hlen - 1;
 	if (details.last_index < details.first_index)
@@ -2381,8 +2343,6 @@ void unmap_mapping_range(struct address_space *mapping,
 	i_mmap_lock_read(mapping);
 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
 		unmap_mapping_range_tree(&mapping->i_mmap, &details);
-	if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
-		unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
 	i_mmap_unlock_read(mapping);
 }
 EXPORT_SYMBOL(unmap_mapping_range);
-- 
2.1.3


  reply	other threads:[~2014-12-24 12:22 UTC|newest]

Thread overview: 110+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-12-24 12:22 [PATCH 00/38] mm: remove non-linear mess Kirill A. Shutemov
2014-12-24 12:22 ` Kirill A. Shutemov [this message]
2014-12-24 12:22   ` [PATCH 01/38] mm: drop support of non-linear mapping from unmap/zap codepath Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 02/38] mm: drop support of non-linear mapping from fault codepath Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 03/38] mm: drop vm_ops->remap_pages and generic_file_remap_pages() stub Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 04/38] proc: drop handling non-linear mappings Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 05/38] rmap: drop support of " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 06/38] mm: replace vma->sharead.linear with vma->shared Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 07/38] mm: remove rest usage of VM_NONLINEAR and pte_file() Kirill A. Shutemov
2014-12-24 14:02   ` [PATCHv2 " Kirill A. Shutemov
2014-12-24 14:02     ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 08/38] asm-generic: drop unused pte_file* helpers Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 09/38] alpha: drop _PAGE_FILE and pte_file()-related helpers Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 10/38] arc: " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:29   ` Vineet Gupta
2014-12-24 12:29     ` Vineet Gupta
2014-12-24 12:22 ` [PATCH 11/38] arm64: drop PTE_FILE " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 17:23   ` Catalin Marinas
2014-12-24 17:23     ` Catalin Marinas
2014-12-24 12:22 ` [PATCH 12/38] arm: drop L_PTE_FILE " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 13/38] avr32: drop _PAGE_FILE " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 14:15   ` Hans-Christian Egtvedt
2014-12-24 14:15     ` Hans-Christian Egtvedt
2014-12-24 12:22 ` [PATCH 14/38] blackfin: drop pte_file() Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 15/38] c6x: " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 14:08   ` Mark Salter
2014-12-24 14:08     ` Mark Salter
2014-12-24 12:22 ` [PATCH 16/38] cris: drop _PAGE_FILE and pte_file()-related helpers Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 17/38] frv: " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 18/38] hexagon: " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 19/38] ia64: " Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 20/38] m32r: " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 21/38] m68k: " Kirill A. Shutemov
2014-12-29 12:16   ` [PATCHv2 " Kirill A. Shutemov
2014-12-29 12:16     ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 22/38] metag: " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2015-01-02 16:03   ` James Hogan
2015-01-02 16:03     ` James Hogan
2014-12-24 12:22 ` [PATCH 23/38] microblaze: " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 24/38] mips: " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-25 10:08   ` Geert Uytterhoeven
2014-12-25 10:08     ` Geert Uytterhoeven
2014-12-25 10:18     ` Kirill A. Shutemov
2014-12-25 10:18       ` Kirill A. Shutemov
2014-12-29 12:17     ` [PATCHv2 " Kirill A. Shutemov
2014-12-29 12:17       ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 25/38] mn10300: " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 26/38] nios2: " Kirill A. Shutemov
2014-12-25 12:30   ` Tobias Klauser
2014-12-26  5:52     ` Ley Foon Tan
2014-12-26  5:52       ` Ley Foon Tan
2014-12-24 12:22 ` [PATCH 27/38] openrisc: " Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 28/38] parisc: " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 29/38] powerpc: " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2015-01-05  5:03   ` Michael Ellerman
2014-12-24 12:22 ` [PATCH 30/38] s390: drop " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-29 10:07   ` Martin Schwidefsky
2014-12-29 10:37     ` Kirill A. Shutemov
2014-12-29 10:37       ` Kirill A. Shutemov
2014-12-30 10:55       ` Martin Schwidefsky
2014-12-30 10:55         ` Martin Schwidefsky
2014-12-24 12:22 ` [PATCH 31/38] score: drop _PAGE_FILE and " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 32/38] sh: " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 14:05   ` [PATCH] " Kirill A. Shutemov
2014-12-24 14:05     ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 33/38] sparc: drop " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-25 23:34   ` David Miller
2014-12-24 12:22 ` [PATCH 34/38] tile: " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-25  2:57   ` Chris Metcalf
2014-12-25  2:57     ` Chris Metcalf
2014-12-24 12:22 ` [PATCH 35/38] um: drop _PAGE_FILE and " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 36/38] unicore32: drop " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 37/38] x86: drop _PAGE_FILE and " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-24 12:22 ` [PATCH 38/38] xtensa: " Kirill A. Shutemov
2014-12-24 12:22   ` Kirill A. Shutemov
2014-12-25 10:25   ` Max Filippov
2014-12-25 10:25     ` Max Filippov
2014-12-26 21:10 ` [PATCH 00/38] mm: remove non-linear mess Felipe Balbi
2014-12-26 21:10   ` Felipe Balbi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1419423766-114457-2-git-send-email-kirill.shutemov@linux.intel.com \
    --to=kirill.shutemov@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=davej@redhat.com \
    --cc=hughd@google.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=sasha.levin@oracle.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).