* [Linux-ia64] HugeTLB Page patch for IA-64 2.5.60 kernel
@ 2003-02-22 4:37 Seth, Rohit
0 siblings, 0 replies; only message in thread
From: Seth, Rohit @ 2003-02-22 4:37 UTC (permalink / raw)
To: linux-ia64
[-- Attachment #1.1: Type: text/plain, Size: 261 bytes --]
Please find attached a hugetlb page patch for IA-64 2.5.60 kernel .
Changes in generic files are mostly backported from 2.5.62 (to get
hugetlb support functioning properly).
Let me know if anyone sees any issues.
thanks,
rohit
<<patch.2560>>
[-- Attachment #1.2: Type: text/html, Size: 856 bytes --]
[-- Attachment #2: patch.2560 --]
[-- Type: application/octet-stream, Size: 5841 bytes --]
--- include/linux/hugetlb.h.org Wed Feb 19 12:11:16 2003
+++ include/linux/hugetlb.h Fri Feb 21 10:46:55 2003
@@ -26,6 +26,7 @@
unsigned long address);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write);
+int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
int pmd_huge(pmd_t pmd);
extern int htlbpage_max;
@@ -56,6 +57,7 @@
#define hugepage_vma(mm, addr) 0
#define mark_mm_hugetlb(mm, vma) do { } while (0)
#define follow_huge_pmd(mm, addr, pmd, write) 0
+#define is_aligned_hugepage_range(addr, len) 0
#define pmd_huge(x) 0
#ifndef HPAGE_MASK
--- include/asm-ia64/page.h.org Fri Feb 21 10:41:55 2003
+++ include/asm-ia64/page.h Fri Feb 21 11:35:00 2003
@@ -128,6 +128,9 @@
# define htlbpage_to_page(x) ((REGION_NUMBER(x) << 61) \
| (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+extern int is_invalid_hugepage_range(unsigned long addr, unsigned long len);
+#else
+#define is_invalid_hugepage_range(addr, len) 0
#endif
static __inline__ int
--- mm/mmap.c.org Wed Feb 19 11:45:51 2003
+++ mm/mmap.c Wed Feb 19 12:56:07 2003
@@ -797,10 +797,26 @@
unsigned long pgoff, unsigned long flags)
{
if (flags & MAP_FIXED) {
+ unsigned long ret;
+
if (addr > TASK_SIZE - len)
return -ENOMEM;
if (addr & ~PAGE_MASK)
return -EINVAL;
+ if (file && is_file_hugepages(file))
+ /* If the request is for hugepages, then make sure that addr
+ * and length is properly aligned.
+ */
+ ret = is_aligned_hugepage_range(addr, len);
+ else
+ /*
+ * Make sure that a normal request is not falling
+ * in reserved hugepage range. For some archs like IA-64,
+ * there is seperate region for hugepages.
+ */
+ ret = is_invalid_hugepage_range(addr, len);
+ if (ret)
+ return ret;
return addr;
}
--- fs/hugetlbfs/inode.c.org Wed Feb 19 11:01:43 2003
+++ fs/hugetlbfs/inode.c Wed Feb 19 11:03:27 2003
@@ -45,6 +45,7 @@
{
struct inode *inode =file->f_dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
+ loff_t len;
int ret;
if (!capable(CAP_IPC_LOCK))
@@ -65,6 +66,10 @@
vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
vma->vm_ops = &hugetlb_vm_ops;
ret = hugetlb_prefault(mapping, vma);
+ len = (loff_t)(vma->vm_end - vma->vm_start) +
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+ if (ret == 0 && inode->i_size < len)
+ inode->i_size = len;
up(&inode->i_sem);
return ret;
}
@@ -211,7 +216,7 @@
list_add(&inode->i_list, &inode_unused);
}
inodes_stat.nr_unused++;
- if (!super_block | (super_block->s_flags & MS_ACTIVE)) {
+ if (!super_block || (super_block->s_flags & MS_ACTIVE)) {
spin_unlock(&inode_lock);
return;
}
--- arch/ia64/mm/hugetlbpage.c.org Thu Feb 13 11:24:44 2003
+++ arch/ia64/mm/hugetlbpage.c Fri Feb 21 18:17:58 2003
@@ -95,6 +95,31 @@
set_pte(page_table, entry);
return;
}
+/*
+ * This function checks for proper alignment of input addr and len parameters.
+ */
+int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
+{
+ if (len & ~HPAGE_MASK)
+ return -EINVAL;
+ if (addr & ~HPAGE_MASK)
+ return -EINVAL;
+ if (REGION_NUMBER(addr) != REGION_HPAGE)
+ return -EINVAL;
+
+ return 0;
+}
+/* This function checks if the address and address+len falls out of HugeTLB region. It
+ * return -EINVAL if any part of address range falls in HugeTLB region.
+ */
+int is_invalid_hugepage_range(unsigned long addr, unsigned long len)
+{
+ if (REGION_NUMBER(addr) == REGION_HPAGE)
+ return -EINVAL;
+ if (REGION_NUMBER(addr+len) == REGION_HPAGE)
+ return -EINVAL;
+ return 0;
+}
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
@@ -158,6 +183,39 @@
return i;
}
+struct vm_area_struct *hugepage_vma(struct mm_struct *mm, unsigned long addr)
+{
+ if (mm->used_hugetlb) {
+ if (REGION_NUMBER(addr) == REGION_HPAGE) {
+ struct vm_area_struct *vma = find_vma(mm, addr);
+ if (vma && is_vm_hugetlb_page(vma))
+ return vma;
+ }
+ }
+ return NULL;
+}
+
+struct page *follow_huge_addr(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int write)
+{
+ struct page *page;
+ pte_t *ptep;
+
+ ptep = huge_pte_offset(mm, addr);
+ page = pte_page(*ptep);
+ page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
+ get_page(page);
+ return page;
+}
+int pmd_huge(pmd_t pmd)
+{
+ return 0;
+}
+struct page *
+follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
+{
+ return NULL;
+}
+
void free_huge_page(struct page *page)
{
BUG_ON(page_count(page));
@@ -189,8 +247,6 @@
BUG_ON(start & (HPAGE_SIZE - 1));
BUG_ON(end & (HPAGE_SIZE - 1));
- spin_lock(&htlbpage_lock);
- spin_unlock(&htlbpage_lock);
for (address = start; address < end; address += HPAGE_SIZE) {
pte = huge_pte_offset(mm, address);
if (pte_none(*pte))
@@ -242,8 +298,12 @@
ret = -ENOMEM;
goto out;
}
- add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
+ ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
unlock_page(page);
+ if (ret) {
+ free_huge_page(page);
+ goto out;
+ }
}
set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
}
@@ -287,8 +347,8 @@
break;
}
page = list_entry(p, struct page, list);
- if ((page_zone(page))->name[0] != 'H') // Look for non-Highmem
- map = page;
+ if (!PageHighMem(page))
+ map = page;
}
if (map) {
list_del(&map->list);
@@ -302,8 +362,8 @@
int set_hugetlb_mem_size(int count)
{
- int j, lcount;
- struct page *page, *map;
+ int lcount;
+ struct page *page ;
extern long htlbzone_pages;
extern struct list_head htlbpage_freelist;
@@ -402,5 +462,4 @@
struct vm_operations_struct hugetlb_vm_ops = {
.nopage = hugetlb_nopage,
- .close = zap_hugetlb_resources,
};
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2003-02-22 4:37 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2003-02-22 4:37 [Linux-ia64] HugeTLB Page patch for IA-64 2.5.60 kernel Seth, Rohit
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox