--- linux-2.6.9_base/include/asm-ia64/mmu_context.h 2005-09-02 14:56:31.000000000 -0400 +++ linux-2.6.9/include/asm-ia64/mmu_context.h 2005-10-05 10:10:36.000000000 -0400 @@ -33,11 +33,13 @@ unsigned int next; /* next context number to use */ unsigned int limit; /* next >= limit => must call wrap_mmu_context() */ unsigned int max_ctx; /* max. context value supported by all CPUs */ + unsigned long *bitmap; /* bitmap size is max_ctx+1 */ }; extern struct ia64_ctx ia64_ctx; DECLARE_PER_CPU(u8, ia64_need_tlb_flush); +extern void mmu_context_init (void); extern void wrap_mmu_context (struct mm_struct *mm); static inline void @@ -80,6 +82,7 @@ if (ia64_ctx.next >= ia64_ctx.limit) wrap_mmu_context(mm); mm->context = context = ia64_ctx.next++; + set_bit(context, ia64_ctx.bitmap); } } spin_unlock_irqrestore(&ia64_ctx.lock, flags); --- linux-2.6.9_base/include/asm-ia64/tlbflush.h 2005-09-02 14:56:31.000000000 -0400 +++ linux-2.6.9/include/asm-ia64/tlbflush.h 2005-09-30 11:11:11.000000000 -0400 @@ -51,6 +51,7 @@ if (!mm) return; + clear_bit(mm->context, ia64_ctx.bitmap); mm->context = 0; if (atomic_read(&mm->mm_users) == 0) --- linux-2.6.9_base/arch/ia64/mm/tlb.c 2005-09-02 14:56:53.000000000 -0400 +++ linux-2.6.9/arch/ia64/mm/tlb.c 2005-10-05 10:10:48.000000000 -0400 @@ -16,12 +16,14 @@ #include #include #include +#include #include #include #include #include #include +#include static struct { unsigned long mask; /* mask of supported purge page-sizes */ @@ -32,19 +34,39 @@ .lock = SPIN_LOCK_UNLOCKED, .next = 1, .limit = (1 << 15) - 1, /* start out with the safe (architected) limit */ - .max_ctx = ~0U + .max_ctx = ~0U, + .bitmap = NULL }; DEFINE_PER_CPU(u8, ia64_need_tlb_flush); /* + * Initializes the ia64_ctx.bitmap array based on max_ctx+1. + * Called after cpu_init() has setup ia64_ctx.max_ctx based on + * maximum RID that is supported by all CPUs. + */ +void __init +mmu_context_init (void) +{ + unsigned long flags; + + spin_lock_irqsave(&ia64_ctx.lock, flags); + if (ia64_ctx.bitmap == NULL) { + ia64_ctx.bitmap = (unsigned long *)__alloc_bootmem( + ia64_ctx.max_ctx+1, + PAGE_SIZE, + __pa(MAX_DMA_ADDRESS)); + } + spin_unlock_irqrestore(&ia64_ctx.lock, flags); +} + +/* * Acquire the ia64_ctx.lock before calling this function! */ void wrap_mmu_context (struct mm_struct *mm) { - unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx; - struct task_struct *tsk; + unsigned int next_ctx, max_ctx = ia64_ctx.max_ctx; int i; if (ia64_ctx.next > max_ctx) @@ -52,28 +74,23 @@ ia64_ctx.limit = max_ctx + 1; /* - * Scan all the task's mm->context and set proper safe range + * Scan the ia64_ctx bitmap and set proper safe range */ +repeat: + next_ctx = find_next_zero_bit(ia64_ctx.bitmap, ia64_ctx.limit, ia64_ctx.next); + if (next_ctx >= ia64_ctx.limit) { + smp_mb(); + ia64_ctx.next = 300; /* skip daemons */ + goto repeat; + } + ia64_ctx.next = next_ctx; - read_lock(&tasklist_lock); - repeat: - for_each_process(tsk) { - if (!tsk->mm) - continue; - tsk_context = tsk->mm->context; - if (tsk_context == ia64_ctx.next) { - if (++ia64_ctx.next >= ia64_ctx.limit) { - /* empty range: reset the range limit and start over */ - if (ia64_ctx.next > max_ctx) - ia64_ctx.next = 300; - ia64_ctx.limit = max_ctx + 1; - goto repeat; - } - } - if ((tsk_context > ia64_ctx.next) && (tsk_context < ia64_ctx.limit)) - ia64_ctx.limit = tsk_context; + next_ctx = find_next_bit(ia64_ctx.bitmap, ia64_ctx.limit, ia64_ctx.next); + if (next_ctx >= ia64_ctx.limit) { + next_ctx = ia64_ctx.limit; } - read_unlock(&tasklist_lock); + ia64_ctx.limit = next_ctx; + /* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */ { int cpu = get_cpu(); /* prevent preemption/migration */ --- linux-2.6.9_base/arch/ia64/kernel/setup.c 2005-09-02 14:56:49.000000000 -0400 +++ linux-2.6.9/arch/ia64/kernel/setup.c 2005-09-30 11:07:46.000000000 -0400 @@ -345,6 +345,7 @@ #endif cpu_init(); /* initialize the bootstrap CPU */ + mmu_context_init(); /* initialize context_id bitmap */ #ifdef CONFIG_ACPI_BOOT acpi_boot_init();