linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Radosław Smogura" <mail@smogura.eu>
To: linux-mm@kvack.org
Cc: Yongqiang Yang <xiaoqiangnk@gmail.com>,
	mail@smogura.eu, linux-ext4@vger.kernel.org
Subject: [PATCH 09/18] Allowing usage of MAP_HUGETLB in mmap
Date: Thu, 16 Feb 2012 15:31:36 +0100	[thread overview]
Message-ID: <1329402705-25454-9-git-send-email-mail@smogura.eu> (raw)
In-Reply-To: <1329402705-25454-1-git-send-email-mail@smogura.eu>

Patch adds support for mapping file with MAP_HUGETLB and does
checks if filesystem supports huge page cache.

Signed-off-by: RadosA?aw Smogura <mail@smogura.eu>
---
 mm/mmap.c  |   24 +++++++++++++++-
 mm/shmem.c |   84 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 106 insertions(+), 2 deletions(-)

diff --git a/mm/mmap.c b/mm/mmap.c
index 3f758c7..19f3016 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -992,6 +992,12 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
 	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
 			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 
+	if (flags & MAP_HUGETLB) {
+		vm_flags &= ~VM_NOHUGEPAGE;
+		vm_flags |= VM_HUGEPAGE;
+		printk(KERN_INFO "Setted huge page mapping in do_mmap_pgoff.");
+	}
+
 	if (flags & MAP_LOCKED)
 		if (!can_do_mlock())
 			return -EPERM;
@@ -1086,11 +1092,25 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
 
 	if (!(flags & MAP_ANONYMOUS)) {
 		audit_mmap_fd(fd, flags);
-		if (unlikely(flags & MAP_HUGETLB))
-			return -EINVAL;
 		file = fget(fd);
 		if (!file)
 			goto out;
+
+		if (unlikely(flags & MAP_HUGETLB)) {
+#ifdef CONFIG_HUGEPAGECACHE
+			if (!(file->f_mapping->a_ops->defragpage)) {
+				fput(file);
+				retval = -EINVAL;
+				goto out;
+			} else {
+				printk(KERN_INFO "Called to mmap huge with"
+					" good fs type.\n");
+			}
+#else
+			fput(file);
+			return -EINVAL;
+#endif
+		}
 	} else if (flags & MAP_HUGETLB) {
 		struct user_struct *user = NULL;
 		/*
diff --git a/mm/shmem.c b/mm/shmem.c
index 269d049..a834488 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1065,6 +1065,90 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	return ret;
 }
 
+static int shmem_fault_huge(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+	int error;
+	int ret = VM_FAULT_LOCKED;
+
+	error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
+	if (error)
+		return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
+
+	/* Just portion of developer code, to force defragmentation, as we have
+	 * no external interface to make defragmentation (or daemon to do it).
+	 */
+	if ((vma->vm_flags & VM_HUGEPAGE) && !PageCompound(vmf->page)) {
+		/* Force defrag - mainly devo code */
+		int defragResult;
+		const loff_t hugeChunkSize = 1 << (PMD_SHIFT - PAGE_SHIFT);
+
+		const loff_t vmaSizeToMap = (vma->vm_start
+				+ ((vmf->pgoff + vma->vm_pgoff + hugeChunkSize)
+				<< PAGE_SHIFT) <= vma->vm_end) ?
+					hugeChunkSize : 0;
+
+		const loff_t inodeSizeToMap =
+				(vmf->pgoff + vma->vm_pgoff + hugeChunkSize <
+				inode->i_size) ? hugeChunkSize : 0;
+
+		const struct defrag_pagecache_ctl defragControl = {
+			.fillPages = 1,
+			.requireFillPages = 1,
+			.force = 1
+		};
+
+		if (ret & VM_FAULT_LOCKED) {
+			unlock_page(vmf->page);
+		}
+		put_page(vmf->page);
+
+		defragResult = defragPageCache(vma->vm_file,
+			vmf->pgoff,
+			min(vmaSizeToMap, min(inodeSizeToMap, hugeChunkSize)),
+			&defragControl);
+		printk(KERN_INFO "Page defragmented with result %d\n",
+			defragResult);
+		
+		/* Retake page. */
+		error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE,
+			&ret);
+		if (error) {
+			return ((error == -ENOMEM) ?
+				VM_FAULT_OOM : VM_FAULT_SIGBUS);
+		}
+	}
+
+	/* XXX Page & compound lock ordering please... */
+	
+	/* After standard fault page is getted. */
+	if (PageCompound(vmf->page)) {
+		compound_lock(vmf->page);
+		if (!PageHead(vmf->page)) {
+			compound_unlock(vmf->page);
+			goto no_hugepage;
+		}
+	}else {
+		goto no_hugepage;
+	}
+	
+	if (!(ret & VM_FAULT_LOCKED))
+		lock_page(vmf->page);
+	
+	ret |= VM_FAULT_LOCKED;
+	
+	if (ret & VM_FAULT_MAJOR) {
+		count_vm_event(PGMAJFAULT);
+		mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
+	}
+	return ret;
+no_hugepage:
+	if (ret & VM_FAULT_LOCKED)
+		unlock_page(vmf->page);
+	page_cache_release(vmf->page);
+	vmf->page = NULL;
+	return VM_FAULT_NOHUGE;
+}
 #ifdef CONFIG_NUMA
 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
 {
-- 
1.7.3.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2012-02-16 14:32 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-02-16 14:31 [PATCH 01/18] Added hacking menu for override optimization by GCC Radosław Smogura
2012-02-16 14:31 ` [PATCH 02/18] Change of refcounting method for compound pages and atomic heads Radosław Smogura
2012-02-16 14:31 ` [PATCH 03/18] Compound read / write locking aka get / put Radosław Smogura
2012-02-16 14:31 ` [PATCH 04/18] Page flag for tracking compound deque Radosław Smogura
2012-02-16 14:31 ` [PATCH 05/18] Various VM_BUG_ON for securing tail pages usage Radosław Smogura
2012-02-16 14:31 ` [PATCH 06/18] Make migrate pages fucntion more flexible Radosław Smogura
2012-02-16 14:31 ` [PATCH 07/18] Configuration menu for Huge Page Cache Radosław Smogura
2012-02-16 14:31 ` [PATCH 08/18] Generic routines for defragmenting pagecache Radosław Smogura
2012-02-16 14:31 ` Radosław Smogura [this message]
2012-02-16 14:31 ` [PATCH 10/18] Support for huge page faulting Radosław Smogura
2012-02-16 14:31 ` [PATCH 11/18] Basic support (faulting) for huge pages for shmfs Radosław Smogura
2012-02-16 14:31 ` [PATCH 12/18] Additional macros for pmd operations Radosław Smogura
2012-02-16 14:31 ` [PATCH 13/18] Zapping and freeing huge mappings Radosław Smogura
2012-02-16 14:31 ` [PATCH 14/18] Fixes for proc memory Radosław Smogura
2012-02-16 14:31 ` [PATCH 15/18] Splitting and truncating Radosław Smogura
2012-02-16 14:31 ` [PATCH 16/18] SHM: Support for splitting on truncation Radosław Smogura
2012-02-16 14:31 ` [PATCH 17/18] [Experimental] Support for huge pages in EXT 4 Radosław Smogura
2012-02-16 14:31 ` [PATCH 18/18] [WIP] Dummy patch for details Radosław Smogura
2012-02-16 15:44 ` [PATCH 01/18] Added hacking menu for override optimization by GCC Randy Dunlap
2012-02-16 16:11   ` Radosław Smogura
2012-02-16 19:09 ` Michal Nazarewicz
2012-02-16 20:26   ` Radosław Smogura
2012-02-16 21:59     ` Michal Nazarewicz
2012-02-16 22:40       ` Radosław Smogura
2012-02-16 23:11         ` Michal Nazarewicz
2012-02-17 14:33           ` [PATCH] " Radosław Smogura

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1329402705-25454-9-git-send-email-mail@smogura.eu \
    --to=mail@smogura.eu \
    --cc=linux-ext4@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=xiaoqiangnk@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).