diff -urN 2.4.17-IA64-kdb-J9/arch/ia64/kdb/kdba_bt.c 2.4.17-IA64-kdb-J9ia64/arch/ia64/kdb/kdba_bt.c --- 2.4.17-IA64-kdb-J9/arch/ia64/kdb/kdba_bt.c Thu Jan 31 16:42:51 2002 +++ 2.4.17-IA64-kdb-J9ia64/arch/ia64/kdb/kdba_bt.c Thu Jan 31 18:40:40 2002 @@ -197,7 +197,7 @@ } #ifdef CONFIG_SMP else if (task_has_cpu(p)) { - sw = kdb_sw[p->processor]; + sw = kdb_sw[p->cpu]; } #endif else { diff -urN 2.4.17-IA64-kdb-J9/arch/ia64/kernel/entry.S 2.4.17-IA64-kdb-J9ia64/arch/ia64/kernel/entry.S --- 2.4.17-IA64-kdb-J9/arch/ia64/kernel/entry.S Thu Jan 31 16:42:26 2002 +++ 2.4.17-IA64-kdb-J9ia64/arch/ia64/kernel/entry.S Thu Jan 31 18:40:40 2002 @@ -161,7 +161,8 @@ mov r8=r13 // return pointer to previously running task mov r13=in0 // set "current" pointer ;; -(p6) ssm psr.i // renable psr.i AFTER the ic bit is serialized +//(p6) ssm psr.i // interrupt delivery should not be enabled + // with the new O(1) MQ scheduler DO_LOAD_SWITCH_STACK #ifdef CONFIG_SMP @@ -170,7 +171,8 @@ br.ret.sptk.many rp // boogie on out in new context .map: - rsm psr.i | psr.ic + //rsm psr.i | psr.ic + rsm psr.ic movl r25=PAGE_KERNEL ;; srlz.d diff -urN 2.4.17-IA64-kdb-J9/arch/ia64/kernel/irq_ia64.c 2.4.17-IA64-kdb-J9ia64/arch/ia64/kernel/irq_ia64.c --- 2.4.17-IA64-kdb-J9/arch/ia64/kernel/irq_ia64.c Fri Nov 9 23:26:17 2001 +++ 2.4.17-IA64-kdb-J9ia64/arch/ia64/kernel/irq_ia64.c Thu Jan 31 18:40:40 2002 @@ -148,6 +148,14 @@ flags: SA_INTERRUPT, name: "IPI" }; + +extern void smp_task_migration_interrupt(int irq, void *dev_id, struct pt_regs *regs); + +static struct irqaction task_migration_irqaction = { + handler: smp_task_migration_interrupt, + flags: SA_INTERRUPT, + name: "Task migration" +}; #endif void @@ -172,6 +180,7 @@ register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); #ifdef CONFIG_SMP register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); + register_percpu_irq(IA64_TASK_MIGRATION, &task_migration_irqaction); #endif platform_irq_init(); } diff -urN 2.4.17-IA64-kdb-J9/arch/ia64/kernel/process.c 2.4.17-IA64-kdb-J9ia64/arch/ia64/kernel/process.c --- 2.4.17-IA64-kdb-J9/arch/ia64/kernel/process.c Thu Jan 31 16:42:26 2002 +++ 2.4.17-IA64-kdb-J9ia64/arch/ia64/kernel/process.c Thu Jan 31 18:40:40 2002 @@ -125,9 +125,6 @@ cpu_idle (void *unused) { /* endless idle loop with no priority at all */ - init_idle(); - current->nice = 20; - current->counter = -100; while (1) { @@ -136,11 +133,10 @@ min_xtp(); #endif - while (!current->need_resched) { + if (!current->need_resched) { #ifdef CONFIG_IA64_SGI_SN snidle(); #endif - continue; } #ifdef CONFIG_IA64_SGI_SN diff -urN 2.4.17-IA64-kdb-J9/arch/ia64/kernel/setup.c 2.4.17-IA64-kdb-J9ia64/arch/ia64/kernel/setup.c --- 2.4.17-IA64-kdb-J9/arch/ia64/kernel/setup.c Thu Jan 31 16:42:26 2002 +++ 2.4.17-IA64-kdb-J9ia64/arch/ia64/kernel/setup.c Thu Jan 31 18:40:40 2002 @@ -375,10 +375,10 @@ { #ifdef CONFIG_SMP # define lpj c->loops_per_jiffy -# define cpu c->processor +# define cpum c->processor #else # define lpj loops_per_jiffy -# define cpu 0 +# define cpum 0 #endif char family[32], features[128], *cp; struct cpuinfo_ia64 *c = v; @@ -417,7 +417,7 @@ "cpu MHz : %lu.%06lu\n" "itc MHz : %lu.%06lu\n" "BogoMIPS : %lu.%02lu\n\n", - cpu, c->vendor, family, c->model, c->revision, c->archrev, + cpum, c->vendor, family, c->model, c->revision, c->archrev, features, c->ppn, c->number, c->proc_freq / 1000000, c->proc_freq % 1000000, c->itc_freq / 1000000, c->itc_freq % 1000000, diff -urN 2.4.17-IA64-kdb-J9/arch/ia64/kernel/smp.c 2.4.17-IA64-kdb-J9ia64/arch/ia64/kernel/smp.c --- 2.4.17-IA64-kdb-J9/arch/ia64/kernel/smp.c Thu Jan 31 16:42:51 2002 +++ 2.4.17-IA64-kdb-J9ia64/arch/ia64/kernel/smp.c Fri Feb 1 14:36:45 2002 @@ -17,6 +17,8 @@ * scheme. * 10/13/00 Goutham Rao Updated smp_call_function and * smp_call_function_single to resend IPI on timeouts + * 01/31/02 Erich Focht smp_call_function_nowait for avoiding + * race conditions with O(1) scheduler. */ #define __KERNEL_SYSCALLS__ @@ -71,10 +73,16 @@ }; static volatile struct call_data_struct *call_data; +static struct call_data_struct nowait_data = { NULL, + NULL, + 0L, + ATOMIC_INIT(0), + ATOMIC_INIT(0) }; #define IPI_CALL_FUNC 0 #define IPI_CPU_STOP 1 #define IPI_KDB_INTERRUPT 2 +#define IPI_CALL_FUNC_NOWAIT 3 static void stop_this_cpu (void) @@ -146,6 +154,26 @@ break; #endif + case IPI_CALL_FUNC_NOWAIT: + { + struct call_data_struct *data; + void (*func)(void *info); + void *info; + + /* release the 'pointer lock' */ + data = (struct call_data_struct *) &nowait_data; + func = data->func; + info = data->info; + + mb(); + atomic_dec(&data->started); + + /* At this point the structure may be gone unless wait is true. */ + (*func)(info); + + } + break; + default: printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which); break; @@ -196,12 +224,55 @@ } void +smp_send_reschedule_all(void) +{ + send_IPI_all(IA64_IPI_RESCHEDULE); +} + +static spinlock_t migration_lock = SPIN_LOCK_UNLOCKED; +static task_t *new_task; + + +/* + * This function sends a 'task migration' IPI to another CPU. + * Must be called from syscall contexts, with interrupts *enabled*. + */ +void smp_migrate_task(int cpu, task_t *p) +{ + /* + * The target CPU will unlock the migration spinlock: + */ + spin_lock(&migration_lock); + new_task = p; + platform_send_ipi(cpu, IA64_TASK_MIGRATION, IA64_IPI_DM_INT, 0); +} + +/* + * Task migration callback. + */ +asmlinkage void smp_task_migration_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + task_t *p; + + p = new_task; + spin_unlock(&migration_lock); + sched_task_migrated(p); +} + +void smp_flush_tlb_all (void) { smp_call_function ((void (*)(void *))__flush_tlb_all,0,1,1); __flush_tlb_all(); } +void +smp_flush_tlb_all_nowait (void) +{ + smp_call_function_nowait ((void (*)(void *))__flush_tlb_all,0); + __flush_tlb_all(); +} + /* * Run a function on another CPU * The function to run. This must be fast and non-blocking. @@ -305,6 +376,42 @@ spin_unlock_bh(&call_lock); return 0; } + +/* + * [SUMMARY] Run a function on all other CPUs. + * The function to run. This must be fast and non-blocking. + * An arbitrary pointer to pass to the function. + * [RETURNS] 0 + * + * Returns immediately. Data is stored in nowait_data where it has chances + * to live long enough... + * Used by smp_flush_tlb_all_nowait() and avoids a race condition with the + * O(1) MQ scheduler (wrap_mmu_context while another CPU waits for the + * runqueue lock of the processor doing context wrap). [EF] + */ +int +smp_call_function_nowait (void (*func) (void *info), void *info) +{ + int cpus = smp_num_cpus-1; + + if (!cpus) + return 0; + + if (atomic_read(&nowait_data.started) > 0) { + printk("smp_call_function_nowait: called before previous call executed on all CPUs!\n"); + } + + nowait_data.func = func; + nowait_data.info = info; + atomic_set(&nowait_data.started, cpus); + nowait_data.wait = wait; + + spin_lock_bh(&call_lock); + mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC_NOWAIT */ + send_IPI_allbutself(IPI_CALL_FUNC_NOWAIT); + spin_unlock_bh(&call_lock); + return 0; +} void smp_do_timer (struct pt_regs *regs) diff -urN 2.4.17-IA64-kdb-J9/arch/ia64/kernel/smpboot.c 2.4.17-IA64-kdb-J9ia64/arch/ia64/kernel/smpboot.c --- 2.4.17-IA64-kdb-J9/arch/ia64/kernel/smpboot.c Thu Jan 31 16:42:26 2002 +++ 2.4.17-IA64-kdb-J9ia64/arch/ia64/kernel/smpboot.c Fri Feb 1 14:27:50 2002 @@ -356,6 +356,7 @@ local_irq_enable(); calibrate_delay(); local_cpu_data->loops_per_jiffy = loops_per_jiffy; + ia64_disable_timer(); /* * Allow the master to continue. */ @@ -379,7 +380,8 @@ Dprintk("CPU %d is set to go.\n", smp_processor_id()); while (!atomic_read(&smp_commenced)) ; - + /* reenable timer interrupts */ + ia64_cpu_local_tick(); Dprintk("CPU %d is starting idle.\n", smp_processor_id()); return cpu_idle(); } @@ -416,11 +418,10 @@ if (!idle) panic("No idle process for CPU %d", cpu); - task_set_cpu(idle, cpu); /* we schedule the first task manually */ + init_idle(idle, cpu); ia64_cpu_to_sapicid[cpu] = sapicid; - del_from_runqueue(idle); unhash_process(idle); init_tasks[cpu] = idle; @@ -481,8 +482,7 @@ printk("Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id); global_irq_holder = 0; - current->processor = 0; - init_idle(); + current->cpu = 0; /* * If SMP should be disabled, then really disable it! @@ -569,3 +569,9 @@ smp_num_cpus = 1; } } + +/* Number of ticks we consider an idle tasks still cache-hot. + * For Itanium: with 1GB/s bandwidth we need 4ms to fill up 4MB L3 cache... + * So let's try 10 ticks. + */ +unsigned long cache_decay_ticks=10; diff -urN 2.4.17-IA64-kdb-J9/arch/ia64/kernel/time.c 2.4.17-IA64-kdb-J9ia64/arch/ia64/kernel/time.c --- 2.4.17-IA64-kdb-J9/arch/ia64/kernel/time.c Fri Nov 9 23:26:17 2001 +++ 2.4.17-IA64-kdb-J9ia64/arch/ia64/kernel/time.c Thu Jan 31 18:41:56 2002 @@ -209,7 +209,7 @@ /* * Encapsulate access to the itm structure for SMP. */ -void __init +void ia64_cpu_local_tick (void) { int cpu = smp_processor_id(); @@ -298,3 +298,9 @@ efi_gettimeofday((struct timeval *) &xtime); ia64_init_itm(); } + +void __init ia64_disable_timer(void) +{ + ia64_set_itv(IA64_TIMER_VECTOR | IA64_TIMER_MASK); +} + diff -urN 2.4.17-IA64-kdb-J9/arch/ia64/mm/fault.c 2.4.17-IA64-kdb-J9ia64/arch/ia64/mm/fault.c --- 2.4.17-IA64-kdb-J9/arch/ia64/mm/fault.c Fri Nov 9 23:26:17 2001 +++ 2.4.17-IA64-kdb-J9ia64/arch/ia64/mm/fault.c Thu Jan 31 18:40:40 2002 @@ -194,8 +194,7 @@ out_of_memory: up_read(&mm->mmap_sem); if (current->pid == 1) { - current->policy |= SCHED_YIELD; - schedule(); + yield(); down_read(&mm->mmap_sem); goto survive; } diff -urN 2.4.17-IA64-kdb-J9/arch/ia64/mm/tlb.c 2.4.17-IA64-kdb-J9ia64/arch/ia64/mm/tlb.c --- 2.4.17-IA64-kdb-J9/arch/ia64/mm/tlb.c Fri Nov 9 23:26:17 2001 +++ 2.4.17-IA64-kdb-J9ia64/arch/ia64/mm/tlb.c Fri Feb 1 14:27:08 2002 @@ -76,7 +76,8 @@ ia64_ctx.limit = tsk_context; } read_unlock(&tasklist_lock); - flush_tlb_all(); + //flush_tlb_all(); /* potential race condition with O(1) scheduler [EF] */ + smp_flush_tlb_all_nowait(); } static inline void diff -urN 2.4.17-IA64-kdb-J9/arch/ia64/tools/print_offsets.c 2.4.17-IA64-kdb-J9ia64/arch/ia64/tools/print_offsets.c --- 2.4.17-IA64-kdb-J9/arch/ia64/tools/print_offsets.c Fri Nov 9 23:26:17 2001 +++ 2.4.17-IA64-kdb-J9ia64/arch/ia64/tools/print_offsets.c Thu Jan 31 18:40:40 2002 @@ -54,7 +54,7 @@ { "IA64_TASK_PTRACE_OFFSET", offsetof (struct task_struct, ptrace) }, { "IA64_TASK_SIGPENDING_OFFSET", offsetof (struct task_struct, sigpending) }, { "IA64_TASK_NEED_RESCHED_OFFSET", offsetof (struct task_struct, need_resched) }, - { "IA64_TASK_PROCESSOR_OFFSET", offsetof (struct task_struct, processor) }, + { "IA64_TASK_PROCESSOR_OFFSET", offsetof (struct task_struct, cpu) }, { "IA64_TASK_THREAD_OFFSET", offsetof (struct task_struct, thread) }, { "IA64_TASK_THREAD_KSP_OFFSET", offsetof (struct task_struct, thread.ksp) }, #ifdef CONFIG_PERFMON diff -urN 2.4.17-IA64-kdb-J9/include/asm-ia64/bitops.h 2.4.17-IA64-kdb-J9ia64/include/asm-ia64/bitops.h --- 2.4.17-IA64-kdb-J9/include/asm-ia64/bitops.h Thu Jan 31 16:42:28 2002 +++ 2.4.17-IA64-kdb-J9ia64/include/asm-ia64/bitops.h Fri Feb 1 14:39:33 2002 @@ -280,6 +280,20 @@ return result; } +/** + * __ffs - find first bit in a 64 bit long. + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static inline unsigned long +__ffs (unsigned long x) +{ + unsigned long result; + + __asm__ ("popcnt %0=%1" : "=r" (result) : "r" (~x & (x - 1))); + return result; +} + #ifdef __KERNEL__ /* @@ -357,6 +371,8 @@ tmp = *p; found_first: tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ found_middle: return result + ffz(tmp); } @@ -366,8 +382,52 @@ */ #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) +/* + * Find next bit in a bitmap reasonably efficiently.. + */ +static inline int +find_next_bit (void *addr, unsigned long size, unsigned long offset) +{ + unsigned long *p = ((unsigned long *) addr) + (offset >> 6); + unsigned long result = offset & ~63UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 63UL; + if (offset) { + tmp = *(p++); + tmp &= ~0UL << offset; + if (size < 64) + goto found_first; + if (tmp) + goto found_middle; + size -= 64; + result += 64; + } + while (size & ~63UL) { + if ((tmp = *(p++))) + goto found_middle; + result += 64; + size -= 64; + } + if (!size) + return result; + tmp = *p; +found_first: + tmp &= ~0UL >> (64-size); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __ffs(tmp); +} + +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + #ifdef __KERNEL__ +#define __clear_bit(nr, addr) clear_bit(nr, addr) #define ext2_set_bit test_and_set_bit #define ext2_clear_bit test_and_clear_bit #define ext2_test_bit test_bit diff -urN 2.4.17-IA64-kdb-J9/include/asm-ia64/hw_irq.h 2.4.17-IA64-kdb-J9ia64/include/asm-ia64/hw_irq.h --- 2.4.17-IA64-kdb-J9/include/asm-ia64/hw_irq.h Tue Jul 31 19:30:09 2001 +++ 2.4.17-IA64-kdb-J9ia64/include/asm-ia64/hw_irq.h Fri Feb 1 14:39:33 2002 @@ -49,6 +49,7 @@ #define IA64_PERFMON_VECTOR 0xee /* performanc monitor interrupt vector */ #define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */ #define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */ +#define IA64_TASK_MIGRATION 0xfb /* task migration interrupt vector */ #define IA64_IPI_RESCHEDULE 0xfd /* SMP reschedule */ #define IA64_IPI_VECTOR 0xfe /* inter-processor interrupt vector */ @@ -65,6 +66,9 @@ IA64_IPI_DM_EXTINT = 0x7, /* pend an 8259-compatible interrupt. */ }; +/* bit for masking and discarding timer interrupts on IA64 */ +#define IA64_TIMER_MASK (1<<16) + extern __u8 isa_irq_to_vector_map[16]; #define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)] diff -urN 2.4.17-IA64-kdb-J9/include/asm-ia64/mmu_context.h 2.4.17-IA64-kdb-J9ia64/include/asm-ia64/mmu_context.h --- 2.4.17-IA64-kdb-J9/include/asm-ia64/mmu_context.h Fri Nov 9 23:26:17 2001 +++ 2.4.17-IA64-kdb-J9ia64/include/asm-ia64/mmu_context.h Fri Feb 1 14:39:33 2002 @@ -118,6 +118,22 @@ reload_context(next); } +/* + * Needed for the O(1) MQ scheduler. + */ +#if MAX_RT_PRIO != 128 || MAX_PRIO >= 192 +# error update this function. */ +#endif + +static inline int sched_find_first_bit(unsigned long *b) +{ + if (unlikely(b[0])) + return __ffs(b[0]); + if (unlikely(b[1])) + return 64 + __ffs(b[1]); + return __ffs(b[2]) + 128; +} + #define switch_mm(prev_mm,next_mm,next_task,cpu) activate_mm(prev_mm, next_mm) # endif /* ! __ASSEMBLY__ */ diff -urN 2.4.17-IA64-kdb-J9/include/asm-ia64/smp.h 2.4.17-IA64-kdb-J9ia64/include/asm-ia64/smp.h --- 2.4.17-IA64-kdb-J9/include/asm-ia64/smp.h Fri Nov 9 23:26:17 2001 +++ 2.4.17-IA64-kdb-J9ia64/include/asm-ia64/smp.h Fri Feb 1 14:39:33 2002 @@ -27,7 +27,7 @@ #define SMP_IRQ_REDIRECTION (1 << 0) #define SMP_IPI_REDIRECTION (1 << 1) -#define smp_processor_id() (current->processor) +#define smp_processor_id() (current->cpu) extern struct smp_boot_data { int cpu_count; @@ -48,6 +48,9 @@ extern unsigned long ap_wakeup_vector; +extern void smp_send_reschedule(int cpu); +extern void smp_send_reschedule_all(void); + /* * Function to map hard smp processor id to logical id. Slow, so * don't use this in performance-critical code. @@ -110,12 +113,6 @@ #define NO_PROC_ID 0xffffffff /* no processor magic marker */ -/* - * Extra overhead to move a task from one cpu to another (due to TLB and cache misses). - * Expressed in "negative nice value" units (larger number means higher priority/penalty). - */ -#define PROC_CHANGE_PENALTY 20 - extern void __init init_smp_config (void); extern void smp_do_timer (struct pt_regs *regs); diff -urN 2.4.17-IA64-kdb-J9/include/asm-ia64/spinlock.h 2.4.17-IA64-kdb-J9ia64/include/asm-ia64/spinlock.h --- 2.4.17-IA64-kdb-J9/include/asm-ia64/spinlock.h Thu Jan 31 16:42:28 2002 +++ 2.4.17-IA64-kdb-J9ia64/include/asm-ia64/spinlock.h Fri Feb 1 14:39:33 2002 @@ -84,7 +84,7 @@ "mov r29 = 1\n" \ ";;\n" \ "1:\n" \ - "ld4.bias r2 = [%0]\n" \ + "ld4 r2 = [%0]\n" \ ";;\n" \ "cmp4.eq p0,p7 = r0,r2\n" \ "(p7) br.cond.spnt.few 1b \n" \ diff -urN 2.4.17-IA64-kdb-J9/include/linux/smp.h 2.4.17-IA64-kdb-J9ia64/include/linux/smp.h --- 2.4.17-IA64-kdb-J9/include/linux/smp.h Thu Jan 31 18:39:37 2002 +++ 2.4.17-IA64-kdb-J9ia64/include/linux/smp.h Fri Feb 1 14:39:33 2002 @@ -24,12 +24,6 @@ extern void smp_send_stop(void); /* - * sends a 'reschedule' event to another CPU: - */ -extern void FASTCALL(smp_send_reschedule(int cpu)); - - -/* * Boot processor call to load the other CPU's */ extern void smp_boot_cpus(void); diff -urN 2.4.17-IA64-kdb-J9/kdb/kdbmain.c 2.4.17-IA64-kdb-J9ia64/kdb/kdbmain.c --- 2.4.17-IA64-kdb-J9/kdb/kdbmain.c Thu Jan 31 16:42:04 2002 +++ 2.4.17-IA64-kdb-J9ia64/kdb/kdbmain.c Thu Jan 31 18:40:40 2002 @@ -2360,7 +2360,7 @@ for_each_task(p) { kdb_printf("0x%p %08d %08d %1.1d %3.3d %s 0x%p%c%s\n", (void *)p, p->pid, p->p_pptr->pid, - task_has_cpu(p), p->processor, + task_has_cpu(p), p->cpu, (p->state == 0)?"run ":(p->state>0)?"stop":"unrn", (void *)(&p->thread), (p == current) ? '*': ' ', diff -urN 2.4.17-IA64-kdb-J9/kernel/printk.c 2.4.17-IA64-kdb-J9ia64/kernel/printk.c --- 2.4.17-IA64-kdb-J9/kernel/printk.c Thu Jan 31 18:39:37 2002 +++ 2.4.17-IA64-kdb-J9ia64/kernel/printk.c Thu Jan 31 18:40:40 2002 @@ -25,6 +25,8 @@ #include #include #include /* For in_interrupt() */ +#include +#include #include diff -urN 2.4.17-IA64-kdb-J9/kernel/sched.c 2.4.17-IA64-kdb-J9ia64/kernel/sched.c --- 2.4.17-IA64-kdb-J9/kernel/sched.c Thu Jan 31 18:39:37 2002 +++ 2.4.17-IA64-kdb-J9ia64/kernel/sched.c Fri Feb 1 16:06:40 2002 @@ -93,6 +93,15 @@ p->array = array; } +/* needed on IA64, arch/ia64/kernel/head.S relies on it (EF) */ +struct task_struct * init_tasks[NR_CPUS] __initdata = {&init_task, }; + +/* needed in kdb (EF) */ +int task_has_cpu(task_t *p) +{ + return (p == task_rq(p)->curr); +} + /* * A task is 'heavily interactive' if it either has reached the * bottom 25% of the SCHED_OTHER priority range, or if it is below @@ -859,16 +868,16 @@ p->cpus_allowed = new_mask; /* - * Can the task run on the current CPU? If not then + * Can the task run on its current CPU? If not then * migrate the process off to a proper CPU. */ - if (new_mask & (1UL << smp_processor_id())) + if (new_mask & (1UL << p->cpu)) return; #if CONFIG_SMP - current->state = TASK_UNINTERRUPTIBLE; - smp_migrate_task(__ffs(new_mask), current); - - schedule(); + p->state = TASK_UNINTERRUPTIBLE; + smp_migrate_task(__ffs(new_mask), p); + if (p == current) + schedule(); #endif } diff -urN 2.4.17-IA64-kdb-J9/kernel/timer.c 2.4.17-IA64-kdb-J9ia64/kernel/timer.c --- 2.4.17-IA64-kdb-J9/kernel/timer.c Thu Jan 31 18:39:37 2002 +++ 2.4.17-IA64-kdb-J9ia64/kernel/timer.c Thu Jan 31 18:40:40 2002 @@ -585,17 +585,16 @@ update_one_process(p, user_tick, system, cpu); if (p->pid) { - if (--p->counter <= 0) { - p->counter = 0; - p->need_resched = 1; - } - if (p->nice > 0) + if (p->__nice > 0) kstat.per_cpu_nice[cpu] += user_tick; else kstat.per_cpu_user[cpu] += user_tick; kstat.per_cpu_system[cpu] += system; - } else if (really_local_bh_count() || really_local_irq_count() > 1) - kstat.per_cpu_system[cpu] += system; + } else { + if (really_local_bh_count() || really_local_irq_count() > 1) + kstat.per_cpu_system[cpu] += system; + } + scheduler_tick(p); } /*