* [PATCH] Allow __vmalloc with GFP_ATOMIC
@ 2007-04-28 4:03 Giridhar Pemmasani
2007-04-28 4:13 ` Nick Piggin
2007-04-28 7:52 ` Russell King
0 siblings, 2 replies; 4+ messages in thread
From: Giridhar Pemmasani @ 2007-04-28 4:03 UTC (permalink / raw)
To: Linux Kernel Mailing List
Until 2.6.19, __vmalloc with GFP_ATOMIC was possible, but __get_vm_area_node
would allocate the node itself with GFP_KERNEL, causing a warning. In 2.6.19,
this was "fixed" by using the same flags that were passed to __vmalloc also
in __get_vm_area_node. However, __get_vm_area_node does
BUG_ON(in_interrupt()) now, since vmlist_lock is obtained without disabling
bottom-half's. The patch below uses bh disabled lock for vmlist_lock, so that
__vmalloc can be used in interrupt context.
In 2.6.21, __vmalloc with GFP_ATOMIC is used by arch/um/kernel/process.c;
__vmalloc is also used in ntfs, xfs, but it is not clear to me if they use it
with GFP_ATOMIC or GFP_KERNEL.
Thanks,
Giri
Signed-off-by: Giridhar Pemmasani <pgiri@yahoo.com>
---
--- linux-2.6.21.orig/./arch/arm/mm/ioremap.c 2007-04-25 23:08:32.000000000
-0400
+++ linux-2.6.21.new/./arch/arm/mm/ioremap.c 2007-04-27 23:29:27.000000000
-0400
@@ -363,7 +363,7 @@
* all the mappings before the area can be reclaimed
* by someone else.
*/
- write_lock(&vmlist_lock);
+ write_lock_bh(&vmlist_lock);
for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
if (tmp->flags & VM_ARM_SECTION_MAPPING) {
@@ -376,7 +376,7 @@
break;
}
}
- write_unlock(&vmlist_lock);
+ write_unlock_bh(&vmlist_lock);
#endif
if (!section_mapping)
--- linux-2.6.21.orig/./arch/i386/mm/ioremap.c 2007-04-25 23:08:32.000000000
-0400
+++ linux-2.6.21.new/./arch/i386/mm/ioremap.c 2007-04-27 23:29:27.000000000
-0400
@@ -180,12 +180,12 @@
in parallel. Reuse of the virtual address is prevented by
leaving it in the global lists until we're done with it.
cpa takes care of the direct mappings. */
- read_lock(&vmlist_lock);
+ read_lock_bh(&vmlist_lock);
for (p = vmlist; p; p = p->next) {
if (p->addr == addr)
break;
}
- read_unlock(&vmlist_lock);
+ read_unlock_bh(&vmlist_lock);
if (!p) {
printk("iounmap: bad address %p\n", addr);
--- linux-2.6.21.orig/./arch/x86_64/mm/ioremap.c 2007-04-25
23:08:32.000000000 -0400
+++ linux-2.6.21.new/./arch/x86_64/mm/ioremap.c 2007-04-27 23:29:27.000000000
-0400
@@ -175,12 +175,12 @@
in parallel. Reuse of the virtual address is prevented by
leaving it in the global lists until we're done with it.
cpa takes care of the direct mappings. */
- read_lock(&vmlist_lock);
+ read_lock_bh(&vmlist_lock);
for (p = vmlist; p; p = p->next) {
if (p->addr == addr)
break;
}
- read_unlock(&vmlist_lock);
+ read_unlock_bh(&vmlist_lock);
if (!p) {
printk("iounmap: bad address %p\n", addr);
--- linux-2.6.21.orig/./fs/proc/kcore.c 2007-04-25 23:08:32.000000000 -0400
+++ linux-2.6.21.new/./fs/proc/kcore.c 2007-04-27 23:29:27.000000000 -0400
@@ -335,7 +335,7 @@
if (!elf_buf)
return -ENOMEM;
- read_lock(&vmlist_lock);
+ read_lock_bh(&vmlist_lock);
for (m=vmlist; m && cursize; m=m->next) {
unsigned long vmstart;
unsigned long vmsize;
@@ -363,7 +363,7 @@
memcpy(elf_buf + (vmstart - start),
(char *)vmstart, vmsize);
}
- read_unlock(&vmlist_lock);
+ read_unlock_bh(&vmlist_lock);
if (copy_to_user(buffer, elf_buf, tsz)) {
kfree(elf_buf);
return -EFAULT;
--- linux-2.6.21.orig/./fs/proc/mmu.c 2007-04-25 23:08:32.000000000 -0400
+++ linux-2.6.21.new/./fs/proc/mmu.c 2007-04-27 23:29:41.000000000 -0400
@@ -47,7 +47,7 @@
prev_end = VMALLOC_START;
- read_lock(&vmlist_lock);
+ read_lock_bh(&vmlist_lock);
for (vma = vmlist; vma; vma = vma->next) {
unsigned long addr = (unsigned long) vma->addr;
@@ -72,6 +72,6 @@
if (VMALLOC_END - prev_end > vmi->largest_chunk)
vmi->largest_chunk = VMALLOC_END - prev_end;
- read_unlock(&vmlist_lock);
+ read_unlock_bh(&vmlist_lock);
}
}
--- linux-2.6.21.orig/./mm/vmalloc.c 2007-04-25 23:08:32.000000000 -0400
+++ linux-2.6.21.new/./mm/vmalloc.c 2007-04-27 23:33:17.000000000 -0400
@@ -168,7 +168,7 @@
unsigned long align = 1;
unsigned long addr;
- BUG_ON(in_interrupt());
+ BUG_ON(in_irq());
if (flags & VM_IOREMAP) {
int bit = fls(size);
@@ -193,7 +193,7 @@
*/
size += PAGE_SIZE;
- write_lock(&vmlist_lock);
+ write_lock_bh(&vmlist_lock);
for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
if ((unsigned long)tmp->addr < addr) {
if((unsigned long)tmp->addr + tmp->size >= addr)
@@ -220,12 +220,12 @@
area->pages = NULL;
area->nr_pages = 0;
area->phys_addr = 0;
- write_unlock(&vmlist_lock);
+ write_unlock_bh(&vmlist_lock);
return area;
out:
- write_unlock(&vmlist_lock);
+ write_unlock_bh(&vmlist_lock);
kfree(area);
if (printk_ratelimit())
printk(KERN_WARNING "allocation failed: out of vmalloc space - use
vmalloc=<size> to increase size.\n");
@@ -305,9 +305,9 @@
struct vm_struct *remove_vm_area(void *addr)
{
struct vm_struct *v;
- write_lock(&vmlist_lock);
+ write_lock_bh(&vmlist_lock);
v = __remove_vm_area(addr);
- write_unlock(&vmlist_lock);
+ write_unlock_bh(&vmlist_lock);
return v;
}
@@ -364,7 +364,7 @@
*/
void vfree(void *addr)
{
- BUG_ON(in_interrupt());
+ BUG_ON(in_irq());
__vunmap(addr, 1);
}
EXPORT_SYMBOL(vfree);
@@ -380,7 +380,7 @@
*/
void vunmap(void *addr)
{
- BUG_ON(in_interrupt());
+ BUG_ON(in_irq());
__vunmap(addr, 0);
}
EXPORT_SYMBOL(vunmap);
@@ -530,10 +530,10 @@
ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
PAGE_KERNEL);
if (ret) {
- write_lock(&vmlist_lock);
+ write_lock_bh(&vmlist_lock);
area = __find_vm_area(ret);
area->flags |= VM_USERMAP;
- write_unlock(&vmlist_lock);
+ write_unlock_bh(&vmlist_lock);
}
return ret;
}
@@ -604,10 +604,10 @@
ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
if (ret) {
- write_lock(&vmlist_lock);
+ write_lock_bh(&vmlist_lock);
area = __find_vm_area(ret);
area->flags |= VM_USERMAP;
- write_unlock(&vmlist_lock);
+ write_unlock_bh(&vmlist_lock);
}
return ret;
}
@@ -623,7 +623,7 @@
if ((unsigned long) addr + count < count)
count = -(unsigned long) addr;
- read_lock(&vmlist_lock);
+ read_lock_bh(&vmlist_lock);
for (tmp = vmlist; tmp; tmp = tmp->next) {
vaddr = (char *) tmp->addr;
if (addr >= vaddr + tmp->size - PAGE_SIZE)
@@ -647,7 +647,7 @@
} while (--n > 0);
}
finished:
- read_unlock(&vmlist_lock);
+ read_unlock_bh(&vmlist_lock);
return buf - buf_start;
}
@@ -661,7 +661,7 @@
if ((unsigned long) addr + count < count)
count = -(unsigned long) addr;
- read_lock(&vmlist_lock);
+ read_lock_bh(&vmlist_lock);
for (tmp = vmlist; tmp; tmp = tmp->next) {
vaddr = (char *) tmp->addr;
if (addr >= vaddr + tmp->size - PAGE_SIZE)
@@ -684,7 +684,7 @@
} while (--n > 0);
}
finished:
- read_unlock(&vmlist_lock);
+ read_unlock_bh(&vmlist_lock);
return buf - buf_start;
}
@@ -712,7 +712,7 @@
if ((PAGE_SIZE-1) & (unsigned long)addr)
return -EINVAL;
- read_lock(&vmlist_lock);
+ read_lock_bh(&vmlist_lock);
area = __find_vm_area(addr);
if (!area)
goto out_einval_locked;
@@ -722,7 +722,7 @@
if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
goto out_einval_locked;
- read_unlock(&vmlist_lock);
+ read_unlock_bh(&vmlist_lock);
addr += pgoff << PAGE_SHIFT;
do {
@@ -742,7 +742,7 @@
return ret;
out_einval_locked:
- read_unlock(&vmlist_lock);
+ read_unlock_bh(&vmlist_lock);
return -EINVAL;
}
EXPORT_SYMBOL(remap_vmalloc_range);
__________________________________________________
Do You Yahoo!?
Tired of spam? Yahoo! Mail has the best spam protection around
http://mail.yahoo.com
^ permalink raw reply [flat|nested] 4+ messages in thread* Re: [PATCH] Allow __vmalloc with GFP_ATOMIC
2007-04-28 4:03 [PATCH] Allow __vmalloc with GFP_ATOMIC Giridhar Pemmasani
@ 2007-04-28 4:13 ` Nick Piggin
2007-04-28 5:48 ` Giridhar Pemmasani
2007-04-28 7:52 ` Russell King
1 sibling, 1 reply; 4+ messages in thread
From: Nick Piggin @ 2007-04-28 4:13 UTC (permalink / raw)
To: Giridhar Pemmasani; +Cc: Linux Kernel Mailing List
Giridhar Pemmasani wrote:
> Until 2.6.19, __vmalloc with GFP_ATOMIC was possible, but __get_vm_area_node
> would allocate the node itself with GFP_KERNEL, causing a warning. In 2.6.19,
> this was "fixed" by using the same flags that were passed to __vmalloc also
> in __get_vm_area_node. However, __get_vm_area_node does
> BUG_ON(in_interrupt()) now, since vmlist_lock is obtained without disabling
> bottom-half's. The patch below uses bh disabled lock for vmlist_lock, so that
> __vmalloc can be used in interrupt context.
>
> In 2.6.21, __vmalloc with GFP_ATOMIC is used by arch/um/kernel/process.c;
> __vmalloc is also used in ntfs, xfs, but it is not clear to me if they use it
> with GFP_ATOMIC or GFP_KERNEL.
>
> Thanks,
> Giri
Hi Giri,
I'm sure I've read the reason for this one before, but when you do patches
like these, can you include that reason in the changelog please?
Thanks,
Nick
--
SUSE Labs, Novell Inc.
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] Allow __vmalloc with GFP_ATOMIC
2007-04-28 4:03 [PATCH] Allow __vmalloc with GFP_ATOMIC Giridhar Pemmasani
2007-04-28 4:13 ` Nick Piggin
@ 2007-04-28 7:52 ` Russell King
1 sibling, 0 replies; 4+ messages in thread
From: Russell King @ 2007-04-28 7:52 UTC (permalink / raw)
To: Giridhar Pemmasani; +Cc: Linux Kernel Mailing List
On Fri, Apr 27, 2007 at 09:03:33PM -0700, Giridhar Pemmasani wrote:
> Until 2.6.19, __vmalloc with GFP_ATOMIC was possible, but __get_vm_area_node
> would allocate the node itself with GFP_KERNEL, causing a warning. In 2.6.19,
> this was "fixed" by using the same flags that were passed to __vmalloc also
> in __get_vm_area_node. However, __get_vm_area_node does
> BUG_ON(in_interrupt()) now, since vmlist_lock is obtained without disabling
> bottom-half's. The patch below uses bh disabled lock for vmlist_lock, so that
> __vmalloc can be used in interrupt context.
It's worse than that. If vmalloc has to allocate a page table, the
allocation will be done as a non-atomic allocation. So, even if you
do fix __get_vm_area_node and all the other cases, if you hit a page
table allocation you'll still get a warning.
Folk need to accept that using vmalloc from atomic contexts is a
complete nono.
--
Russell King
Linux kernel 2.6 ARM Linux - http://www.arm.linux.org.uk/
maintainer of:
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2007-04-28 7:52 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-04-28 4:03 [PATCH] Allow __vmalloc with GFP_ATOMIC Giridhar Pemmasani
2007-04-28 4:13 ` Nick Piggin
2007-04-28 5:48 ` Giridhar Pemmasani
2007-04-28 7:52 ` Russell King
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox