public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [cpumask_t 1/3] core changes for 2.5.67-bk6
@ 2003-04-15 22:50 William Lee Irwin III
  2003-04-15 22:58 ` [cpumask_t 2/3] i386 " William Lee Irwin III
  2003-04-18 17:20 ` [cpumask_t 1/3] core " Randy.Dunlap
  0 siblings, 2 replies; 5+ messages in thread
From: William Lee Irwin III @ 2003-04-15 22:50 UTC (permalink / raw)
  To: linux-kernel

Core changes for extended cpu masks. Basically use a machine word
#if NR_CPUS < BITS_PER_LONG, otherwise, use a structure with an array
of unsigned longs for it. Sprinkle it around the scheduler and a few
other odd places that play with the cpu bitmasks. Back-ended by a
bitmap ADT capable of dealing with arbitrary-width bitmaps, with the
obvious micro-optimizations for NR_CPUS < BITS_PER_LONG and UP.

NR_CPUS % BITS_PER_LONG != 0 is invalid while NR_CPUS > BITS_PER_LONG.

cpus_weight(), cpus_shift_left(), cpus_shift_right(), and
cpus_complement() are from Martin Hicks.


diff -urpN linux-2.5.67-bk6/drivers/base/node.c cpu-2.5.67-bk6-1/drivers/base/node.c
--- linux-2.5.67-bk6/drivers/base/node.c	2003-04-07 10:30:43.000000000 -0700
+++ cpu-2.5.67-bk6-1/drivers/base/node.c	2003-04-15 14:39:40.000000000 -0700
@@ -7,7 +7,7 @@
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/node.h>
-
+#include <linux/cpumask.h>
 #include <asm/topology.h>
 
 
@@ -31,7 +31,16 @@ struct device_driver node_driver = {
 static ssize_t node_read_cpumap(struct device * dev, char * buf)
 {
 	struct node *node_dev = to_node(to_root(dev));
-        return sprintf(buf,"%lx\n",node_dev->cpumap);
+	cpumask_t tmp = node_dev->cpumap;
+	int k, len = 0;
+
+	for (k = 0; k < CPU_ARRAY_SIZE; ++k) {
+        	int j = sprintf(buf,"%lx\n", cpus_coerce(tmp));
+		len += j;
+		buf += j;
+		cpus_shift_right(tmp, tmp, BITS_PER_LONG);
+	}
+	return len;
 }
 static DEVICE_ATTR(cpumap,S_IRUGO,node_read_cpumap,NULL);
 
diff -urpN linux-2.5.67-bk6/include/linux/bitmap.h cpu-2.5.67-bk6-1/include/linux/bitmap.h
--- linux-2.5.67-bk6/include/linux/bitmap.h	1969-12-31 16:00:00.000000000 -0800
+++ cpu-2.5.67-bk6-1/include/linux/bitmap.h	2003-04-15 14:39:40.000000000 -0700
@@ -0,0 +1,131 @@
+#ifndef __LINUX_BITMAP_H
+#define __LINUX_BITMAP_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+
+static inline int bitmap_empty(volatile unsigned long *bitmap, int bits)
+{
+	int k;
+	for (k = 0; k < BITS_TO_LONGS(bits); ++k)
+		if (bitmap[k])
+			return 0;
+
+	return 1;
+}
+
+static inline int bitmap_full(volatile unsigned long *bitmap, int bits)
+{
+	int k;
+	for (k = 0; k < BITS_TO_LONGS(bits); ++k)
+		if (~bitmap[k])
+			return 0;
+
+	return 1;
+}
+
+static inline int bitmap_equal(volatile unsigned long *bitmap1, volatile unsigned long *bitmap2, int bits)
+{
+	int k;
+	for (k = 0; k < BITS_TO_LONGS(bits); ++k)
+		if (bitmap1[k] != bitmap2[k])
+			return 0;
+
+	return 1;
+}
+
+static inline void bitmap_complement(volatile unsigned long *bitmap, int bits)
+{
+	int k;
+
+	for (k = 0; k < BITS_TO_LONGS(bits); ++k)
+		bitmap[k] = ~bitmap[k];
+}
+
+static inline void bitmap_clear(volatile unsigned long *bitmap, int bits)
+{
+	CLEAR_BITMAP((unsigned long *)bitmap, bits);
+}
+
+static inline void bitmap_fill(volatile unsigned long *bitmap, int bits)
+{
+	memset((unsigned long *)bitmap, 0xff, BITS_TO_LONGS(bits)*sizeof(unsigned long));
+}
+
+static inline void bitmap_copy(volatile unsigned long *dst, volatile unsigned long *src, int bits)
+{
+	memcpy((unsigned long *)dst, (unsigned long *)src, BITS_TO_LONGS(bits)*sizeof(unsigned long));
+}
+
+static inline void bitmap_shift_left(volatile unsigned long *,volatile unsigned long *,int,int);
+static inline void bitmap_shift_right(volatile unsigned long *dst, volatile unsigned long *src, int shift, int bits)
+{
+	int k;
+	DECLARE_BITMAP(__shr_tmp, bits);
+
+	bitmap_clear(__shr_tmp, bits);
+	for (k = 0; k < bits - shift; ++k)
+		if (test_bit(k + shift, src))
+			set_bit(k, __shr_tmp);
+	bitmap_copy(dst, __shr_tmp, bits);
+}
+
+static inline void bitmap_shift_left(volatile unsigned long *dst, volatile unsigned long *src, int shift, int bits)
+{
+	int k;
+	DECLARE_BITMAP(__shl_tmp, bits);
+
+	bitmap_clear(__shl_tmp, bits);
+	for (k = bits; k >= shift; --k)
+		if (test_bit(k - shift, src))
+			set_bit(k, __shl_tmp);
+	bitmap_copy(dst, __shl_tmp, bits);
+}
+
+static inline void bitmap_and(volatile unsigned long *dst, volatile unsigned long *bitmap1, volatile unsigned long *bitmap2, int bits)
+{
+	int k;
+
+	for (k = 0; k < BITS_TO_LONGS(bits); ++k)
+		dst[k] = bitmap1[k] & bitmap2[k];
+}
+
+static inline void bitmap_or(volatile unsigned long *dst, volatile unsigned long *bitmap1, volatile unsigned long *bitmap2, int bits)
+{
+	int k;
+
+	for (k = 0; k < BITS_TO_LONGS(bits); ++k)
+		dst[k] = bitmap1[k] | bitmap2[k];
+}
+
+#if BITS_PER_LONG == 32
+static inline int bitmap_weight(volatile unsigned long *bitmap, int bits)
+{
+	int k, w = 0;
+
+	for (k = 0; k < BITS_TO_LONGS(bits); ++k)
+		w += hweight32(bitmap[k]);
+
+	return w;
+}
+#elif BITS_PER_LONG == 64
+static inline int bitmap_weight(volatile unsigned long *bitmap, int bits)
+{
+	int k, w = 0;
+
+	for (k = 0; k < BITS_TO_LONGS(bits); ++k)
+		w += hweight64(bitmap[k]);
+
+	return w;
+}
+#endif
+
+#endif
+
+#endif /* __LINUX_BITMAP_H */
diff -urpN linux-2.5.67-bk6/include/linux/cpumask.h cpu-2.5.67-bk6-1/include/linux/cpumask.h
--- linux-2.5.67-bk6/include/linux/cpumask.h	1969-12-31 16:00:00.000000000 -0800
+++ cpu-2.5.67-bk6-1/include/linux/cpumask.h	2003-04-15 14:39:40.000000000 -0700
@@ -0,0 +1,112 @@
+#ifndef __LINUX_CPUMASK_H
+#define __LINUX_CPUMASK_H
+
+#define CPU_ARRAY_SIZE		BITS_TO_LONGS(NR_CPUS)
+
+#if NR_CPUS > BITS_PER_LONG
+
+#include <linux/bitmap.h>
+
+struct cpumask
+{
+	unsigned long mask[CPU_ARRAY_SIZE];
+};
+
+typedef struct cpumask cpumask_t;
+
+#define cpu_set(cpu, map)		set_bit(cpu, (map).mask)
+#define cpu_clear(cpu, map)		clear_bit(cpu, (map).mask)
+#define cpu_isset(cpu, map)		test_bit(cpu, (map).mask)
+#define cpu_test_and_set(cpu, map)	test_and_set_bit(cpu, (map).mask)
+
+#define cpus_and(dst,src1,src2)	bitmap_and((dst).mask,(src1).mask, (src2).mask, NR_CPUS)
+#define cpus_or(dst,src1,src2)	bitmap_or((dst).mask, (src1).mask, (src2).mask, NR_CPUS)
+#define cpus_clear(map)		bitmap_clear((map).mask, NR_CPUS)
+#define cpus_complement(map)	bitmap_complement((map).mask, NR_CPUS)
+#define cpus_equal(map1, map2)	bitmap_equal((map1).mask, (map2).mask, NR_CPUS)
+#define cpus_empty(map)		bitmap_empty(map.mask, NR_CPUS)
+#define cpus_weight(map)		bitmap_weight((map).mask, NR_CPUS)
+#define cpus_shift_right(d, s, n)	bitmap_shift_right((d).mask, (s).mask, n, NR_CPUS)
+#define cpus_shift_left(d, s, n)	bitmap_shift_left((d).mask, (s).mask, n, NR_CPUS)
+#define first_cpu(map)		find_first_bit((map).mask, NR_CPUS)
+#define next_cpu(cpu, map)	find_next_bit((map).mask, NR_CPUS, cpu)
+
+/* only ever use this for things that are _never_ used on large boxen */
+#define cpus_coerce(map)	((map).mask[0])
+#define any_online_cpu(map)	find_first_bit((map).mask, NR_CPUS)
+
+/*
+ * um, these need to be usable as static initializers
+ */
+#define CPU_MASK_ALL	{ {[0 ... CPU_ARRAY_SIZE-1] = ~0UL} }
+#define CPU_MASK_NONE	{ {[0 ... CPU_ARRAY_SIZE-1] =  0UL} }
+
+#else /* NR_CPUS <= BITS_PER_LONG */
+
+typedef unsigned long cpumask_t;
+
+#define cpu_set(cpu, map)		do { map |= 1UL << (cpu); } while (0)
+#define cpu_clear(cpu, map)		do { map &= ~(1UL << (cpu)); } while (0)
+#define cpu_isset(cpu, map)		((map) & (1UL << (cpu)))
+#define cpu_test_and_set(cpu, map)	test_and_set_bit(cpu, &(map))
+
+#define cpus_and(dst,src1,src2)		do { dst = (src1) & (src2); } while (0)
+#define cpus_or(dst,src1,src2)		do { dst = (src1) | (src2); } while (0)
+#define cpus_clear(map)			do { map = 0UL; } while (0)
+#define cpus_complement(map)		do { map = ~(map); } while (0)
+#define cpus_equal(map1, map2)		((map1) == (map2))
+#define cpus_empty(map)			((map) != 0UL)
+
+#if BITS_PER_LONG == 32
+#define cpus_weight(map)		hweight32(map)
+#elif BITS_PER_LONG == 64
+#define cpus_weight(map)		hweight64(map)
+#endif
+
+#define cpus_shift_right(dst, src, n)	do { dst = (src) >> (n); } while (0)
+#define cpus_shift_left(dst, src, n)	do { dst = (src) >> (n); } while (0)
+
+#define any_online_cpu(map)		((map) != 0UL)
+
+#ifdef CONFIG_SMP
+#define first_cpu(map)			__ffs(map)
+#define next_cpu(cpu, map)		__ffs((map) & ~((1UL << (cpu)) - 1))
+#define CPU_MASK_ALL	~0UL
+#define CPU_MASK_NONE	0UL
+#else /* UP */
+#define first_cpu(map)			0
+#define next_cpu(cpu, map)		1
+#define CPU_MASK_ALL	1UL
+#define CPU_MASK_NONE	0UL
+#endif
+
+/* only ever use this for things that are _never_ used on large boxen */
+#define cpus_coerce(map)		(map)
+
+#endif /* NR_CPUS <= BITS_PER_LONG */
+
+#ifdef CONFIG_SMP
+extern cpumask_t cpu_online_map;
+
+#define num_online_cpus()		cpus_weight(cpu_online_map)
+#define cpu_online(cpu)			cpu_isset(cpu, cpu_online_map)
+#else
+#define	cpu_online_map			0x1UL
+#define num_online_cpus()		1
+#define cpu_online(cpu)			({ BUG_ON((cpu) != 0); 1; })
+#endif
+
+static inline int next_online_cpu(int cpu, cpumask_t map)
+{
+	do
+		cpu = next_cpu(cpu, map);
+	while (cpu < NR_CPUS && !cpu_online(cpu));
+	return cpu;
+}
+
+#define for_each_cpu(cpu, map)						\
+	for (cpu = first_cpu(map); cpu < NR_CPUS; cpu = next_cpu(cpu,map))
+#define for_each_online_cpu(cpu, map)					\
+	for (cpu = first_cpu(map); cpu < NR_CPUS; cpu = next_online_cpu(cpu,map))
+
+#endif /* __LINUX_CPUMASK_H */
diff -urpN linux-2.5.67-bk6/include/linux/init_task.h cpu-2.5.67-bk6-1/include/linux/init_task.h
--- linux-2.5.67-bk6/include/linux/init_task.h	2003-04-15 14:38:03.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/linux/init_task.h	2003-04-15 14:39:40.000000000 -0700
@@ -68,7 +68,7 @@
 	.prio		= MAX_PRIO-20,					\
 	.static_prio	= MAX_PRIO-20,					\
 	.policy		= SCHED_NORMAL,					\
-	.cpus_allowed	= ~0UL,						\
+	.cpus_allowed	= CPU_MASK_ALL,					\
 	.mm		= NULL,						\
 	.active_mm	= &init_mm,					\
 	.run_list	= LIST_HEAD_INIT(tsk.run_list),			\
diff -urpN linux-2.5.67-bk6/include/linux/irq.h cpu-2.5.67-bk6-1/include/linux/irq.h
--- linux-2.5.67-bk6/include/linux/irq.h	2003-04-07 10:31:07.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/linux/irq.h	2003-04-15 14:39:40.000000000 -0700
@@ -44,7 +44,7 @@ struct hw_interrupt_type {
 	void (*disable)(unsigned int irq);
 	void (*ack)(unsigned int irq);
 	void (*end)(unsigned int irq);
-	void (*set_affinity)(unsigned int irq, unsigned long mask);
+	void (*set_affinity)(unsigned int irq, unsigned long dest);
 };
 
 typedef struct hw_interrupt_type  hw_irq_controller;
diff -urpN linux-2.5.67-bk6/include/linux/node.h cpu-2.5.67-bk6-1/include/linux/node.h
--- linux-2.5.67-bk6/include/linux/node.h	2003-04-07 10:32:51.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/linux/node.h	2003-04-15 14:39:40.000000000 -0700
@@ -20,9 +20,10 @@
 #define _LINUX_NODE_H_
 
 #include <linux/device.h>
+#include <linux/cpumask.h>
 
 struct node {
-	unsigned long cpumap;	/* Bitmap of CPUs on the Node */
+	cpumask_t cpumap;	/* Bitmap of CPUs on the Node */
 	struct sys_root sysroot;
 };
 
diff -urpN linux-2.5.67-bk6/include/linux/rcupdate.h cpu-2.5.67-bk6-1/include/linux/rcupdate.h
--- linux-2.5.67-bk6/include/linux/rcupdate.h	2003-04-07 10:30:58.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/linux/rcupdate.h	2003-04-15 14:39:40.000000000 -0700
@@ -40,6 +40,7 @@
 #include <linux/spinlock.h>
 #include <linux/threads.h>
 #include <linux/percpu.h>
+#include <linux/cpumask.h>
 
 /**
  * struct rcu_head - callback structure for use with RCU
@@ -67,7 +68,7 @@ struct rcu_ctrlblk {
 	spinlock_t	mutex;		/* Guard this struct                  */
 	long		curbatch;	/* Current batch number.	      */
 	long		maxbatch;	/* Max requested batch number.        */
-	unsigned long	rcu_cpu_mask; 	/* CPUs that need to switch in order  */
+	cpumask_t	rcu_cpu_mask; 	/* CPUs that need to switch in order  */
 					/* for current batch to proceed.      */
 };
 
@@ -114,7 +115,7 @@ static inline int rcu_pending(int cpu) 
 	     rcu_batch_before(RCU_batch(cpu), rcu_ctrlblk.curbatch)) ||
 	    (list_empty(&RCU_curlist(cpu)) &&
 			 !list_empty(&RCU_nxtlist(cpu))) ||
-	    test_bit(cpu, &rcu_ctrlblk.rcu_cpu_mask))
+	    cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask))
 		return 1;
 	else
 		return 0;
diff -urpN linux-2.5.67-bk6/include/linux/sched.h cpu-2.5.67-bk6-1/include/linux/sched.h
--- linux-2.5.67-bk6/include/linux/sched.h	2003-04-15 14:38:03.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/linux/sched.h	2003-04-15 14:39:41.000000000 -0700
@@ -12,6 +12,7 @@
 #include <linux/jiffies.h>
 #include <linux/rbtree.h>
 #include <linux/thread_info.h>
+#include <linux/cpumask.h>
 
 #include <asm/system.h>
 #include <asm/semaphore.h>
@@ -197,7 +198,7 @@ struct mm_struct {
 	unsigned long arg_start, arg_end, env_start, env_end;
 	unsigned long rss, total_vm, locked_vm;
 	unsigned long def_flags;
-	unsigned long cpu_vm_mask;
+	cpumask_t cpu_vm_mask;
 	unsigned long swap_address;
 
 	unsigned dumpable:1;
@@ -331,7 +332,7 @@ struct task_struct {
 	unsigned long last_run;
 
 	unsigned long policy;
-	unsigned long cpus_allowed;
+	cpumask_t cpus_allowed;
 	unsigned int time_slice, first_time_slice;
 
 	struct list_head tasks;
@@ -467,7 +468,7 @@ do { if (atomic_dec_and_test(&(tsk)->usa
 #define PF_KSWAPD	0x00040000	/* I am kswapd */
 
 #if CONFIG_SMP
-extern void set_cpus_allowed(task_t *p, unsigned long new_mask);
+extern void set_cpus_allowed(task_t *p, cpumask_t new_mask);
 #else
 # define set_cpus_allowed(p, new_mask) do { } while (0)
 #endif
diff -urpN linux-2.5.67-bk6/include/linux/smp.h cpu-2.5.67-bk6-1/include/linux/smp.h
--- linux-2.5.67-bk6/include/linux/smp.h	2003-04-07 10:31:44.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/linux/smp.h	2003-04-15 14:39:41.000000000 -0700
@@ -115,9 +115,6 @@ void smp_prepare_boot_cpu(void);
 #define on_each_cpu(func,info,retry,wait)	({ func(info); 0; })
 static inline void smp_send_reschedule(int cpu) { }
 static inline void smp_send_reschedule_all(void) { }
-#define cpu_online_map				1
-#define cpu_online(cpu)				({ BUG_ON((cpu) != 0); 1; })
-#define num_online_cpus()			1
 #define num_booting_cpus()			1
 #define cpu_possible(cpu)			({ BUG_ON((cpu) != 0); 1; })
 #define smp_prepare_boot_cpu()			do {} while (0)
diff -urpN linux-2.5.67-bk6/kernel/fork.c cpu-2.5.67-bk6-1/kernel/fork.c
--- linux-2.5.67-bk6/kernel/fork.c	2003-04-15 14:38:03.000000000 -0700
+++ cpu-2.5.67-bk6-1/kernel/fork.c	2003-04-15 14:39:41.000000000 -0700
@@ -257,7 +257,7 @@ static inline int dup_mmap(struct mm_str
 	mm->free_area_cache = TASK_UNMAPPED_BASE;
 	mm->map_count = 0;
 	mm->rss = 0;
-	mm->cpu_vm_mask = 0;
+	cpus_clear(mm->cpu_vm_mask);
 	pprev = &mm->mmap;
 
 	/*
diff -urpN linux-2.5.67-bk6/kernel/module.c cpu-2.5.67-bk6-1/kernel/module.c
--- linux-2.5.67-bk6/kernel/module.c	2003-04-15 14:38:03.000000000 -0700
+++ cpu-2.5.67-bk6-1/kernel/module.c	2003-04-15 14:39:41.000000000 -0700
@@ -300,6 +300,7 @@ static int stopref(void *cpu)
 {
 	int irqs_disabled = 0;
 	int prepared = 0;
+	cpumask_t allowed_mask = CPU_MASK_NONE;
 
 	sprintf(current->comm, "kmodule%lu\n", (unsigned long)cpu);
 
@@ -308,7 +309,8 @@ static int stopref(void *cpu)
 	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
 	setscheduler(current->pid, SCHED_FIFO, &param);
 #endif
-	set_cpus_allowed(current, 1UL << (unsigned long)cpu);
+	cpu_set((int)cpu, allowed_mask);
+	set_cpus_allowed(current, allowed_mask);
 
 	/* Ack: we are alive */
 	atomic_inc(&stopref_thread_ack);
@@ -361,7 +363,7 @@ static void stopref_set_state(enum stopr
 static int stop_refcounts(void)
 {
 	unsigned int i, cpu;
-	unsigned long old_allowed;
+	cpumask_t old_allowed, allowed_mask = CPU_MASK_NONE;
 	int ret = 0;
 
 	/* One thread per cpu.  We'll do our own. */
@@ -369,7 +371,8 @@ static int stop_refcounts(void)
 
 	/* FIXME: racy with set_cpus_allowed. */
 	old_allowed = current->cpus_allowed;
-	set_cpus_allowed(current, 1UL << (unsigned long)cpu);
+	cpu_set(cpu, allowed_mask);
+	set_cpus_allowed(current, allowed_mask);
 
 	atomic_set(&stopref_thread_ack, 0);
 	stopref_num_threads = 0;
diff -urpN linux-2.5.67-bk6/kernel/rcupdate.c cpu-2.5.67-bk6-1/kernel/rcupdate.c
--- linux-2.5.67-bk6/kernel/rcupdate.c	2003-04-07 10:30:34.000000000 -0700
+++ cpu-2.5.67-bk6-1/kernel/rcupdate.c	2003-04-15 14:39:41.000000000 -0700
@@ -47,7 +47,7 @@
 /* Definition for rcupdate control block. */
 struct rcu_ctrlblk rcu_ctrlblk = 
 	{ .mutex = SPIN_LOCK_UNLOCKED, .curbatch = 1, 
-	  .maxbatch = 1, .rcu_cpu_mask = 0 };
+	  .maxbatch = 1, .rcu_cpu_mask = CPU_MASK_NONE };
 DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
 
 /* Fake initialization required by compiler */
@@ -106,7 +106,7 @@ static void rcu_start_batch(long newbatc
 		rcu_ctrlblk.maxbatch = newbatch;
 	}
 	if (rcu_batch_before(rcu_ctrlblk.maxbatch, rcu_ctrlblk.curbatch) ||
-	    (rcu_ctrlblk.rcu_cpu_mask != 0)) {
+	    !cpus_empty(rcu_ctrlblk.rcu_cpu_mask)) {
 		return;
 	}
 	rcu_ctrlblk.rcu_cpu_mask = cpu_online_map;
@@ -121,7 +121,7 @@ static void rcu_check_quiescent_state(vo
 {
 	int cpu = smp_processor_id();
 
-	if (!test_bit(cpu, &rcu_ctrlblk.rcu_cpu_mask)) {
+	if (!cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask)) {
 		return;
 	}
 
@@ -139,13 +139,13 @@ static void rcu_check_quiescent_state(vo
 	}
 
 	spin_lock(&rcu_ctrlblk.mutex);
-	if (!test_bit(cpu, &rcu_ctrlblk.rcu_cpu_mask)) {
+	if (!cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask)) {
 		spin_unlock(&rcu_ctrlblk.mutex);
 		return;
 	}
-	clear_bit(cpu, &rcu_ctrlblk.rcu_cpu_mask);
+	cpu_clear(cpu, rcu_ctrlblk.rcu_cpu_mask);
 	RCU_last_qsctr(cpu) = RCU_QSCTR_INVALID;
-	if (rcu_ctrlblk.rcu_cpu_mask != 0) {
+	if (!cpus_empty(rcu_ctrlblk.rcu_cpu_mask)) {
 		spin_unlock(&rcu_ctrlblk.mutex);
 		return;
 	}
diff -urpN linux-2.5.67-bk6/kernel/sched.c cpu-2.5.67-bk6-1/kernel/sched.c
--- linux-2.5.67-bk6/kernel/sched.c	2003-04-15 14:38:03.000000000 -0700
+++ cpu-2.5.67-bk6-1/kernel/sched.c	2003-04-15 14:39:41.000000000 -0700
@@ -502,7 +502,7 @@ repeat_lock_task:
 			 */
 			if (unlikely(sync && !task_running(rq, p) &&
 				(task_cpu(p) != smp_processor_id()) &&
-				(p->cpus_allowed & (1UL << smp_processor_id())))) {
+				cpu_isset(smp_processor_id(), p->cpus_allowed))) {
 
 				set_task_cpu(p, smp_processor_id());
 				task_rq_unlock(rq, &flags);
@@ -776,13 +776,14 @@ static inline void double_rq_unlock(runq
  */
 static void sched_migrate_task(task_t *p, int dest_cpu)
 {
-	unsigned long old_mask;
+	cpumask_t old_mask, new_mask = CPU_MASK_NONE;
 
 	old_mask = p->cpus_allowed;
-	if (!(old_mask & (1UL << dest_cpu)))
+	if (!cpu_isset(dest_cpu, old_mask))
 		return;
 	/* force the process onto the specified CPU */
-	set_cpus_allowed(p, 1UL << dest_cpu);
+	cpu_set(dest_cpu, new_mask);
+	set_cpus_allowed(p, new_mask);
 
 	/* restore the cpus allowed mask */
 	set_cpus_allowed(p, old_mask);
@@ -795,7 +796,7 @@ static void sched_migrate_task(task_t *p
 static int sched_best_cpu(struct task_struct *p)
 {
 	int i, minload, load, best_cpu, node = 0;
-	unsigned long cpumask;
+	cpumask_t cpumask;
 
 	best_cpu = task_cpu(p);
 	if (cpu_rq(best_cpu)->nr_running <= 2)
@@ -813,7 +814,7 @@ static int sched_best_cpu(struct task_st
 	minload = 10000000;
 	cpumask = node_to_cpumask(node);
 	for (i = 0; i < NR_CPUS; ++i) {
-		if (!(cpumask & (1UL << i)))
+		if (!cpu_isset(i, cpumask))
 			continue;
 		if (cpu_rq(i)->nr_running < minload) {
 			best_cpu = i;
@@ -893,7 +894,7 @@ static inline unsigned int double_lock_b
 /*
  * find_busiest_queue - find the busiest runqueue among the cpus in cpumask.
  */
-static inline runqueue_t *find_busiest_queue(runqueue_t *this_rq, int this_cpu, int idle, int *imbalance, unsigned long cpumask)
+static inline runqueue_t *find_busiest_queue(runqueue_t *this_rq, int this_cpu, int idle, int *imbalance, cpumask_t cpumask)
 {
 	int nr_running, load, max_load, i;
 	runqueue_t *busiest, *rq_src;
@@ -928,7 +929,7 @@ static inline runqueue_t *find_busiest_q
 	busiest = NULL;
 	max_load = 1;
 	for (i = 0; i < NR_CPUS; i++) {
-		if (!((1UL << i) & cpumask))
+		if (!cpu_isset(i, cpumask))
 			continue;
 
 		rq_src = cpu_rq(i);
@@ -1000,7 +1001,7 @@ static inline void pull_task(runqueue_t 
  * We call this with the current runqueue locked,
  * irqs disabled.
  */
-static void load_balance(runqueue_t *this_rq, int idle, unsigned long cpumask)
+static void load_balance(runqueue_t *this_rq, int idle, cpumask_t cpumask)
 {
 	int imbalance, idx, this_cpu = smp_processor_id();
 	runqueue_t *busiest;
@@ -1054,7 +1055,7 @@ skip_queue:
 #define CAN_MIGRATE_TASK(p,rq,this_cpu)					\
 	((jiffies - (p)->last_run > cache_decay_ticks) &&	\
 		!task_running(rq, p) &&					\
-			((p)->cpus_allowed & (1UL << (this_cpu))))
+			(cpu_isset(this_cpu, (p)->cpus_allowed)))
 
 	curr = curr->prev;
 
@@ -1097,10 +1098,10 @@ out:
 static void balance_node(runqueue_t *this_rq, int idle, int this_cpu)
 {
 	int node = find_busiest_node(cpu_to_node(this_cpu));
-	unsigned long cpumask, this_cpumask = 1UL << this_cpu;
 
 	if (node >= 0) {
-		cpumask = node_to_cpumask(node) | this_cpumask;
+		cpumask_t cpumask = node_to_cpumask(node);
+		cpu_set(this_cpu, cpumask);
 		spin_lock(&this_rq->lock);
 		load_balance(this_rq, idle, cpumask);
 		spin_unlock(&this_rq->lock);
@@ -1891,7 +1892,7 @@ out_unlock:
 asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
 				      unsigned long __user *user_mask_ptr)
 {
-	unsigned long new_mask;
+	cpumask_t new_mask;
 	int retval;
 	task_t *p;
 
@@ -1901,8 +1902,8 @@ asmlinkage long sys_sched_setaffinity(pi
 	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
 		return -EFAULT;
 
-	new_mask &= cpu_online_map;
-	if (!new_mask)
+	cpus_and(new_mask, new_mask, cpu_online_map);
+	if (cpus_empty(new_mask))
 		return -EINVAL;
 
 	read_lock(&tasklist_lock);
@@ -1944,7 +1945,7 @@ asmlinkage long sys_sched_getaffinity(pi
 				      unsigned long __user *user_mask_ptr)
 {
 	unsigned int real_len;
-	unsigned long mask;
+	cpumask_t mask;
 	int retval;
 	task_t *p;
 
@@ -1960,7 +1961,7 @@ asmlinkage long sys_sched_getaffinity(pi
 		goto out_unlock;
 
 	retval = 0;
-	mask = p->cpus_allowed & cpu_online_map;
+	cpus_and(mask, p->cpus_allowed, cpu_online_map);
 
 out_unlock:
 	read_unlock(&tasklist_lock);
@@ -2293,16 +2294,15 @@ typedef struct {
  * task must not exit() & deallocate itself prematurely.  The
  * call is not atomic; no spinlocks may be held.
  */
-void set_cpus_allowed(task_t *p, unsigned long new_mask)
+void set_cpus_allowed(task_t *p, cpumask_t new_mask)
 {
 	unsigned long flags;
 	migration_req_t req;
 	runqueue_t *rq;
 
 #if 0 /* FIXME: Grab cpu_lock, return error on this case. --RR */
-	new_mask &= cpu_online_map;
-	if (!new_mask)
-		BUG();
+	cpus_and(new_mask, new_mask, cpu_online_map);
+	BUG_ON(cpus_empty(new_mask));
 #endif
 
 	rq = task_rq_lock(p, &flags);
@@ -2311,7 +2311,7 @@ void set_cpus_allowed(task_t *p, unsigne
 	 * Can the task run on the task's current CPU? If not then
 	 * migrate the thread off to a proper CPU.
 	 */
-	if (new_mask & (1UL << task_cpu(p))) {
+	if (cpu_isset(task_cpu(p), new_mask)) {
 		task_rq_unlock(rq, &flags);
 		return;
 	}
@@ -2320,7 +2320,7 @@ void set_cpus_allowed(task_t *p, unsigne
 	 * it is sufficient to simply update the task's cpu field.
 	 */
 	if (!p->array && !task_running(rq, p)) {
-		set_task_cpu(p, __ffs(p->cpus_allowed));
+		set_task_cpu(p, first_cpu(p->cpus_allowed));
 		task_rq_unlock(rq, &flags);
 		return;
 	}
@@ -2345,6 +2345,7 @@ static int migration_thread(void * data)
 	int cpu = (long) data;
 	runqueue_t *rq;
 	int ret;
+	cpumask_t allowed_mask = CPU_MASK_NONE;
 
 	daemonize("migration/%d", cpu);
 	set_fs(KERNEL_DS);
@@ -2353,7 +2354,8 @@ static int migration_thread(void * data)
 	 * Either we are running on the right CPU, or there's a
 	 * a migration thread on the target CPU, guaranteed.
 	 */
-	set_cpus_allowed(current, 1UL << cpu);
+	cpu_set(cpu, allowed_mask);
+	set_cpus_allowed(current, allowed_mask);
 
 	ret = setscheduler(0, SCHED_FIFO, &param);
 
@@ -2381,7 +2383,11 @@ static int migration_thread(void * data)
 		spin_unlock_irqrestore(&rq->lock, flags);
 
 		p = req->task;
-		cpu_dest = __ffs(p->cpus_allowed & cpu_online_map);
+		{
+			cpumask_t tmp;
+			cpus_and(tmp, p->cpus_allowed, cpu_online_map);
+			cpu_dest = first_cpu(tmp);
+		}
 		rq_dest = cpu_rq(cpu_dest);
 repeat:
 		cpu_src = task_cpu(p);
diff -urpN linux-2.5.67-bk6/kernel/softirq.c cpu-2.5.67-bk6-1/kernel/softirq.c
--- linux-2.5.67-bk6/kernel/softirq.c	2003-04-15 14:38:03.000000000 -0700
+++ cpu-2.5.67-bk6-1/kernel/softirq.c	2003-04-15 14:39:41.000000000 -0700
@@ -308,15 +308,16 @@ void __init softirq_init(void)
 static int ksoftirqd(void * __bind_cpu)
 {
 	int cpu = (int) (long) __bind_cpu;
+	cpumask_t allowed_mask = CPU_MASK_NONE;
 
 	daemonize("ksoftirqd/%d", cpu);
 	set_user_nice(current, 19);
 	current->flags |= PF_IOTHREAD;
 
 	/* Migrate to the right CPU */
-	set_cpus_allowed(current, 1UL << cpu);
-	if (smp_processor_id() != cpu)
-		BUG();
+	cpu_set(cpu, allowed_mask);
+	set_cpus_allowed(current, allowed_mask);
+	BUG_ON(smp_processor_id() != cpu);
 
 	__set_current_state(TASK_INTERRUPTIBLE);
 	mb();
diff -urpN linux-2.5.67-bk6/kernel/workqueue.c cpu-2.5.67-bk6-1/kernel/workqueue.c
--- linux-2.5.67-bk6/kernel/workqueue.c	2003-04-15 14:38:03.000000000 -0700
+++ cpu-2.5.67-bk6-1/kernel/workqueue.c	2003-04-15 14:39:41.000000000 -0700
@@ -169,6 +169,7 @@ static int worker_thread(void *__startup
 	int cpu = cwq - cwq->wq->cpu_wq;
 	DECLARE_WAITQUEUE(wait, current);
 	struct k_sigaction sa;
+	cpumask_t allowed_mask = CPU_MASK_NONE;
 
 	daemonize("%s/%d", startup->name, cpu);
 	allow_signal(SIGCHLD);
@@ -176,7 +177,8 @@ static int worker_thread(void *__startup
 	cwq->thread = current;
 
 	set_user_nice(current, -10);
-	set_cpus_allowed(current, 1UL << cpu);
+	cpu_set(cpu, allowed_mask);
+	set_cpus_allowed(current, allowed_mask);
 
 	complete(&startup->done);
 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [cpumask_t 2/3] i386 changes for 2.5.67-bk6
  2003-04-15 22:50 [cpumask_t 1/3] core changes for 2.5.67-bk6 William Lee Irwin III
@ 2003-04-15 22:58 ` William Lee Irwin III
  2003-04-15 23:08   ` [cpumask_t 3/3] ia64 " William Lee Irwin III
  2003-04-18 17:20 ` [cpumask_t 1/3] core " Randy.Dunlap
  1 sibling, 1 reply; 5+ messages in thread
From: William Lee Irwin III @ 2003-04-15 22:58 UTC (permalink / raw)
  To: linux-kernel

On Tue, Apr 15, 2003 at 03:50:36PM -0700, William Lee Irwin III wrote:
> Core changes for extended cpu masks. Basically use a machine word

i386 changes for extended cpu masks. Basically force various things
that can possibly be used with NR_CPUS > BITS_PER_LONG in arch/i386
to pass typechecking and use the cpumask_t data type. For things that
simply can't be used with the larger systems, I use the cpus_coerce()
escape hatch to avoid things getting awkward.

Changes made only for flat logical tinySMP boxen and NUMA-Q; voyager
and "other" (not sure what "bigsmp" refers to) kind of got left out
since I don't really know my way around their code and I don't have
any systems of their kind to testboot on.


diff -urpN linux-2.5.67-bk6/arch/i386/kernel/apic.c cpu-2.5.67-bk6-1/arch/i386/kernel/apic.c
--- linux-2.5.67-bk6/arch/i386/kernel/apic.c	2003-04-07 10:33:02.000000000 -0700
+++ cpu-2.5.67-bk6-1/arch/i386/kernel/apic.c	2003-04-15 14:39:36.000000000 -0700
@@ -1136,7 +1136,8 @@ int __init APIC_init_uniprocessor (void)
 
 	connect_bsp_APIC();
 
-	phys_cpu_present_map = 1 << boot_cpu_physical_apicid;
+	cpus_clear(phys_cpu_present_map);
+	cpu_set(boot_cpu_physical_apicid, phys_cpu_present_map);
 
 	setup_local_APIC();
 
diff -urpN linux-2.5.67-bk6/arch/i386/kernel/cpu/proc.c cpu-2.5.67-bk6-1/arch/i386/kernel/cpu/proc.c
--- linux-2.5.67-bk6/arch/i386/kernel/cpu/proc.c	2003-04-07 10:32:29.000000000 -0700
+++ cpu-2.5.67-bk6-1/arch/i386/kernel/cpu/proc.c	2003-04-15 14:39:36.000000000 -0700
@@ -60,7 +60,7 @@ static int show_cpuinfo(struct seq_file 
 	int fpu_exception;
 
 #ifdef CONFIG_SMP
-	if (!(cpu_online_map & (1<<n)))
+	if (!cpu_online(n))
 		return 0;
 #endif
 	seq_printf(m, "processor\t: %d\n"
diff -urpN linux-2.5.67-bk6/arch/i386/kernel/io_apic.c cpu-2.5.67-bk6-1/arch/i386/kernel/io_apic.c
--- linux-2.5.67-bk6/arch/i386/kernel/io_apic.c	2003-04-15 14:37:51.000000000 -0700
+++ cpu-2.5.67-bk6-1/arch/i386/kernel/io_apic.c	2003-04-15 14:39:36.000000000 -0700
@@ -240,22 +240,22 @@ static void clear_IO_APIC (void)
 			clear_IO_APIC_pin(apic, pin);
 }
 
-static void set_ioapic_affinity (unsigned int irq, unsigned long mask)
+static void set_ioapic_affinity (unsigned int irq, unsigned long dest)
 {
-	unsigned long flags;
+	unsigned long flags, regval = dest;
 	int pin;
 	struct irq_pin_list *entry = irq_2_pin + irq;
 
 	/*
 	 * Only the first 8 bits are valid.
 	 */
-	mask = mask << 24;
+	regval <<= 24;
 	spin_lock_irqsave(&ioapic_lock, flags);
 	for (;;) {
 		pin = entry->pin;
 		if (pin == -1)
 			break;
-		io_apic_write(entry->apic, 0x10 + 1 + pin*2, mask);
+		io_apic_write(entry->apic, 0x10 + 1 + pin*2, regval);
 		if (!entry->next)
 			break;
 		entry = irq_2_pin + entry->next;
@@ -277,7 +277,7 @@ static void set_ioapic_affinity (unsigne
 #  define Dprintk(x...) 
 # endif
 
-extern unsigned long irq_affinity[NR_IRQS];
+extern cpumask_t irq_affinity[NR_IRQS];
 
 static int __cacheline_aligned pending_irq_balance_apicid[NR_IRQS];
 static int irqbalance_disabled = NO_BALANCE_IRQ;
@@ -296,8 +296,7 @@ struct irq_cpu_info {
 #define IDLE_ENOUGH(cpu,now) \
 		(idle_cpu(cpu) && ((now) - irq_stat[(cpu)].idle_timestamp > 1))
 
-#define IRQ_ALLOWED(cpu,allowed_mask) \
-		((1 << cpu) & (allowed_mask))
+#define IRQ_ALLOWED(cpu, allowed_mask)	cpu_isset(cpu, allowed_mask)
 
 #define CPU_TO_PACKAGEINDEX(i) \
 		((physical_balance && i > cpu_sibling_map[i]) ? cpu_sibling_map[i] : i)
@@ -309,7 +308,7 @@ struct irq_cpu_info {
 
 long balanced_irq_interval = MAX_BALANCED_IRQ_INTERVAL;
 
-static unsigned long move(int curr_cpu, unsigned long allowed_mask,
+static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
 			unsigned long now, int direction)
 {
 	int search_idle = 1;
@@ -339,13 +338,13 @@ inside:
 static inline void balance_irq(int cpu, int irq)
 {
 	unsigned long now = jiffies;
-	unsigned long allowed_mask;
+	cpumask_t allowed_mask;
 	unsigned int new_cpu;
 		
 	if (irqbalance_disabled)
 		return;
 
-	allowed_mask = cpu_online_map & irq_affinity[irq];
+	cpus_and(allowed_mask, cpu_online_map, irq_affinity[irq]);
 	new_cpu = move(cpu, allowed_mask, now, 1);
 	if (cpu != new_cpu) {
 		irq_desc_t *desc = irq_desc + irq;
@@ -388,8 +387,7 @@ static void do_irq_balance(void)
 	int tmp_loaded, first_attempt = 1;
 	unsigned long tmp_cpu_irq;
 	unsigned long imbalance = 0;
-	unsigned long allowed_mask;
-	unsigned long target_cpu_mask;
+	cpumask_t allowed_mask, target_cpu_mask, tmp;
 
 	for (i = 0; i < NR_CPUS; i++) {
 		int package_index;
@@ -538,10 +536,12 @@ tryanotherirq:
 					CPU_IRQ(cpu_sibling_map[min_loaded]))
 		min_loaded = cpu_sibling_map[min_loaded];
 
-	allowed_mask = cpu_online_map & irq_affinity[selected_irq];
-	target_cpu_mask = 1 << min_loaded;
+	cpus_and(allowed_mask, cpu_online_map, irq_affinity[selected_irq]);
+	cpus_clear(target_cpu_mask);
+	cpu_set(min_loaded, target_cpu_mask);
+	cpus_and(tmp, target_cpu_mask, allowed_mask);
 
-	if (target_cpu_mask & allowed_mask) {
+	if (!cpus_empty(tmp)) {
 		irq_desc_t *desc = irq_desc + selected_irq;
 		unsigned long flags;
 
@@ -601,12 +601,14 @@ static int __init balanced_irq_init(void
 {
 	int i;
 	struct cpuinfo_x86 *c;
+	cpumask_t tmp;
 
-        c = &boot_cpu_data;
+	cpus_shift_right(tmp, cpu_online_map, 2);
+	c = &boot_cpu_data;
 	if (irqbalance_disabled)
 		return 0;
 	
-	 /* disable irqbalance completely if there is only one processor online */
+	/* disable irqbalance completely if there is only one processor online */
 	if (num_online_cpus() < 2) {
 		irqbalance_disabled = 1;
 		return 0;
@@ -615,7 +617,7 @@ static int __init balanced_irq_init(void
 	 * Enable physical balance only if more than 1 physical processor
 	 * is present
 	 */
-	if (smp_num_siblings > 1 && cpu_online_map >> 2)
+	if (smp_num_siblings > 1 && !cpus_empty(tmp))
 		physical_balance = 1;
 
 	for (i = 0; i < NR_CPUS; i++) {
@@ -1570,6 +1572,10 @@ static void __init setup_ioapic_ids_from
 		/* This gets done during IOAPIC enumeration for ACPI. */
 		return;
 
+	/*
+	 * This is broken; anything with a real cpu count has to
+	 * circumvent this idiocy regardless.
+	 */
 	phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
 
 	/*
@@ -1597,8 +1603,9 @@ static void __init setup_ioapic_ids_from
 		 * system must have a unique ID or we get lots of nice
 		 * 'stuck on smp_invalidate_needed IPI wait' messages.
 		 */
-		if (check_apicid_used(phys_id_present_map,
-					mp_ioapics[apic].mpc_apicid)) {
+
+		/* MAJOR BRAINDAMAGE */
+		if (phys_id_present_map & (1UL << mp_ioapics[apic].mpc_apicid)) {
 			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
 				apic, mp_ioapics[apic].mpc_apicid);
 			for (i = 0; i < 0xf; i++)
@@ -2197,7 +2204,7 @@ int __init io_apic_get_unique_id (int io
 	 */
 
 	if (!apic_id_map)
-		apic_id_map = phys_cpu_present_map;
+		apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
 
 	spin_lock_irqsave(&ioapic_lock, flags);
 	*(int *)&reg_00 = io_apic_read(ioapic, 0);
diff -urpN linux-2.5.67-bk6/arch/i386/kernel/irq.c cpu-2.5.67-bk6-1/arch/i386/kernel/irq.c
--- linux-2.5.67-bk6/arch/i386/kernel/irq.c	2003-04-07 10:30:39.000000000 -0700
+++ cpu-2.5.67-bk6-1/arch/i386/kernel/irq.c	2003-04-15 14:39:36.000000000 -0700
@@ -42,9 +42,10 @@
 #include <asm/pgalloc.h>
 #include <asm/delay.h>
 #include <asm/desc.h>
+#include <asm/mpspec.h>
 #include <asm/irq.h>
 
-
+#include "mach_apic.h"
 
 /*
  * Linux has a controller-independent x86 interrupt architecture.
@@ -799,13 +800,13 @@ int setup_irq(unsigned int irq, struct i
 static struct proc_dir_entry * root_irq_dir;
 static struct proc_dir_entry * irq_dir [NR_IRQS];
 
-#define HEX_DIGITS 8
+#define HEX_DIGITS (2*sizeof(cpumask_t))
 
 static unsigned int parse_hex_value (const char *buffer,
-		unsigned long count, unsigned long *ret)
+		unsigned long count, cpumask_t *ret)
 {
 	unsigned char hexnum [HEX_DIGITS];
-	unsigned long value;
+	cpumask_t value = CPU_MASK_NONE;
 	int i;
 
 	if (!count)
@@ -819,10 +820,10 @@ static unsigned int parse_hex_value (con
 	 * Parse the first 8 characters as a hex string, any non-hex char
 	 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
 	 */
-	value = 0;
 
 	for (i = 0; i < count; i++) {
 		unsigned int c = hexnum[i];
+		int k;
 
 		switch (c) {
 			case '0' ... '9': c -= '0'; break;
@@ -831,7 +832,10 @@ static unsigned int parse_hex_value (con
 		default:
 			goto out;
 		}
-		value = (value << 4) | c;
+		cpus_shift_left(value, value, 4);
+		for (k = 0; k < 4; ++k)
+			if (test_bit(k, (unsigned long *)&c))
+				cpu_set(k, value);
 	}
 out:
 	*ret = value;
@@ -842,20 +846,31 @@ out:
 
 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
 
-unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
+cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
 static int irq_affinity_read_proc (char *page, char **start, off_t off,
 			int count, int *eof, void *data)
 {
+	int k, len;
+	cpumask_t tmp = irq_affinity[(long)data];
+
 	if (count < HEX_DIGITS+1)
 		return -EINVAL;
-	return sprintf (page, "%08lx\n", irq_affinity[(long)data]);
+
+	len = 0;
+	for (k = 0; k < sizeof(cpumask_t)/sizeof(unsigned long); ++k) {
+		int j = sprintf (page, "%08lx\n", cpus_coerce(tmp));
+		cpus_shift_right(tmp, tmp, BITS_PER_LONG);
+		len  += j;
+		page += j;
+	}
+	return len;
 }
 
 static int irq_affinity_write_proc (struct file *file, const char *buffer,
 					unsigned long count, void *data)
 {
 	int irq = (long) data, full_count = count, err;
-	unsigned long new_value;
+	cpumask_t new_value, tmp;
 
 	if (!irq_desc[irq].handler->set_affinity)
 		return -EIO;
@@ -867,11 +882,12 @@ static int irq_affinity_write_proc (stru
 	 * way to make the system unusable accidentally :-) At least
 	 * one online CPU still has to be targeted.
 	 */
-	if (!(new_value & cpu_online_map))
+	cpus_and(tmp, new_value, cpu_online_map);
+	if (cpus_empty(tmp))
 		return -EINVAL;
 
 	irq_affinity[irq] = new_value;
-	irq_desc[irq].handler->set_affinity(irq, new_value);
+	irq_desc[irq].handler->set_affinity(irq, cpu_to_logical_apicid(first_cpu(new_value)));
 
 	return full_count;
 }
@@ -890,8 +906,9 @@ static int prof_cpu_mask_read_proc (char
 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
 					unsigned long count, void *data)
 {
-	unsigned long *mask = (unsigned long *) data, full_count = count, err;
-	unsigned long new_value;
+	cpumask_t *mask = (cpumask_t *)data;
+	unsigned long full_count = count, err;
+	cpumask_t new_value;
 
 	err = parse_hex_value(buffer, count, &new_value);
 	if (err)
diff -urpN linux-2.5.67-bk6/arch/i386/kernel/ldt.c cpu-2.5.67-bk6-1/arch/i386/kernel/ldt.c
--- linux-2.5.67-bk6/arch/i386/kernel/ldt.c	2003-04-07 10:32:15.000000000 -0700
+++ cpu-2.5.67-bk6-1/arch/i386/kernel/ldt.c	2003-04-15 14:39:36.000000000 -0700
@@ -56,9 +56,11 @@ static int alloc_ldt(mm_context_t *pc, i
 
 	if (reload) {
 #ifdef CONFIG_SMP
+		cpumask_t tmp = CPU_MASK_NONE;
 		preempt_disable();
+		cpu_set(smp_processor_id(), tmp);
 		load_LDT(pc);
-		if (current->mm->cpu_vm_mask != (1 << smp_processor_id()))
+		if (!cpu_isset(smp_processor_id(), current->mm->cpu_vm_mask))
 			smp_call_function(flush_ldt, 0, 1, 1);
 		preempt_enable();
 #else
diff -urpN linux-2.5.67-bk6/arch/i386/kernel/mpparse.c cpu-2.5.67-bk6-1/arch/i386/kernel/mpparse.c
--- linux-2.5.67-bk6/arch/i386/kernel/mpparse.c	2003-04-07 10:31:00.000000000 -0700
+++ cpu-2.5.67-bk6-1/arch/i386/kernel/mpparse.c	2003-04-15 14:39:36.000000000 -0700
@@ -70,7 +70,7 @@ unsigned int boot_cpu_logical_apicid = -
 static unsigned int __initdata num_processors;
 
 /* Bitmask of physically existing CPUs */
-unsigned long phys_cpu_present_map;
+cpumask_t phys_cpu_present_map;
 
 int x86_summit = 0;
 u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
@@ -106,6 +106,7 @@ static struct mpc_config_translation *tr
 void __init MP_processor_info (struct mpc_config_processor *m)
 {
  	int ver, apicid;
+	cpumask_t tmp;
  	
 	if (!(m->mpc_cpuflag & CPU_ENABLED))
 		return;
@@ -176,7 +177,8 @@ void __init MP_processor_info (struct mp
 	}
 	ver = m->mpc_apicver;
 
-	phys_cpu_present_map |= apicid_to_cpu_present(apicid);
+	tmp = apicid_to_cpu_present(apicid);
+	cpus_or(phys_cpu_present_map, phys_cpu_present_map, tmp);
 	
 	/*
 	 * Validate version
diff -urpN linux-2.5.67-bk6/arch/i386/kernel/reboot.c cpu-2.5.67-bk6-1/arch/i386/kernel/reboot.c
--- linux-2.5.67-bk6/arch/i386/kernel/reboot.c	2003-04-07 10:30:59.000000000 -0700
+++ cpu-2.5.67-bk6-1/arch/i386/kernel/reboot.c	2003-04-15 14:39:36.000000000 -0700
@@ -226,7 +226,7 @@ void machine_restart(char * __unused)
 		   if its not, default to the BSP */
 		if ((reboot_cpu == -1) ||  
 		      (reboot_cpu > (NR_CPUS -1))  || 
-		      !(phys_cpu_present_map & (1<<cpuid))) 
+		      !(cpu_isset(cpuid, phys_cpu_present_map)))
 			reboot_cpu = boot_cpu_physical_apicid;
 
 		reboot_smp = 0;  /* use this as a flag to only go through this once*/
diff -urpN linux-2.5.67-bk6/arch/i386/kernel/smp.c cpu-2.5.67-bk6-1/arch/i386/kernel/smp.c
--- linux-2.5.67-bk6/arch/i386/kernel/smp.c	2003-04-07 10:30:40.000000000 -0700
+++ cpu-2.5.67-bk6-1/arch/i386/kernel/smp.c	2003-04-15 14:39:36.000000000 -0700
@@ -155,10 +155,14 @@ void send_IPI_self(int vector)
 	__send_IPI_shortcut(APIC_DEST_SELF, vector);
 }
 
-static inline void send_IPI_mask_bitmask(int mask, int vector)
+/*
+ * This is only used on smaller machines.
+ */
+static inline void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
 {
 	unsigned long cfg;
 	unsigned long flags;
+	unsigned long mask = cpus_coerce(cpumask);
 
 	local_irq_save(flags);
 		
@@ -186,10 +190,10 @@ static inline void send_IPI_mask_bitmask
 	local_irq_restore(flags);
 }
 
-static inline void send_IPI_mask_sequence(int mask, int vector)
+static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
 {
 	unsigned long cfg, flags;
-	unsigned int query_cpu, query_mask;
+	unsigned int query_cpu;
 
 	/*
 	 * Hack. The clustered APIC addressing mode doesn't allow us to send 
@@ -200,8 +204,7 @@ static inline void send_IPI_mask_sequenc
 	local_irq_save(flags);
 
 	for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
-		query_mask = 1 << query_cpu;
-		if (query_mask & mask) {
+		if (cpu_isset(query_cpu, mask)) {
 		
 			/*
 			 * Wait for idle.
@@ -238,7 +241,7 @@ static inline void send_IPI_mask_sequenc
  *	Optimizations Manfred Spraul <manfred@colorfullife.com>
  */
 
-static volatile unsigned long flush_cpumask;
+static volatile cpumask_t flush_cpumask;
 static struct mm_struct * flush_mm;
 static unsigned long flush_va;
 static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
@@ -255,7 +258,7 @@ static inline void leave_mm (unsigned lo
 {
 	if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
 		BUG();
-	clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
+	cpu_clear(cpu, cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
 	load_cr3(swapper_pg_dir);
 }
 
@@ -311,7 +314,7 @@ asmlinkage void smp_invalidate_interrupt
 
 	cpu = get_cpu();
 
-	if (!test_bit(cpu, &flush_cpumask))
+	if (!cpu_isset(cpu, flush_cpumask))
 		goto out;
 		/* 
 		 * This was a BUG() but until someone can quote me the
@@ -332,15 +335,17 @@ asmlinkage void smp_invalidate_interrupt
 			leave_mm(cpu);
 	}
 	ack_APIC_irq();
-	clear_bit(cpu, &flush_cpumask);
-
+	smp_mb__before_clear_bit();
+	cpu_clear(cpu, flush_cpumask);
+	smp_mb__after_clear_bit();
 out:
 	put_cpu_no_resched();
 }
 
-static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
 						unsigned long va)
 {
+	cpumask_t tmp;
 	/*
 	 * A couple of (to be removed) sanity checks:
 	 *
@@ -348,14 +353,12 @@ static void flush_tlb_others (unsigned l
 	 * - current CPU must not be in mask
 	 * - mask must exist :)
 	 */
-	if (!cpumask)
-		BUG();
-	if ((cpumask & cpu_online_map) != cpumask)
-		BUG();
-	if (cpumask & (1 << smp_processor_id()))
-		BUG();
-	if (!mm)
-		BUG();
+	BUG_ON(cpus_empty(cpumask));
+
+	cpus_and(tmp, cpumask, cpu_online_map);
+	BUG_ON(!cpus_equal(cpumask, tmp));
+	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+	BUG_ON(!mm);
 
 	/*
 	 * i'm not happy about this global shared spinlock in the
@@ -367,15 +370,21 @@ static void flush_tlb_others (unsigned l
 	
 	flush_mm = mm;
 	flush_va = va;
-	atomic_set_mask(cpumask, &flush_cpumask);
+	/*
+	 * Probably introduced a bug here. This was:
+	 * atomic_set_mask(cpumask, &flush_cpumask);
+	 */
+	flush_cpumask = cpumask;
+	mb();
 	/*
 	 * We have to send the IPI only to
 	 * CPUs affected.
 	 */
 	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
 
-	while (flush_cpumask)
-		/* nothing. lockup detection does not belong here */;
+	while (!cpus_empty(flush_cpumask))
+		/* nothing. lockup detection does not belong here */
+		mb();
 
 	flush_mm = NULL;
 	flush_va = 0;
@@ -385,23 +394,25 @@ static void flush_tlb_others (unsigned l
 void flush_tlb_current_task(void)
 {
 	struct mm_struct *mm = current->mm;
-	unsigned long cpu_mask;
+	cpumask_t cpu_mask;
 
 	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
+	cpu_mask = mm->cpu_vm_mask;
+	cpu_clear(smp_processor_id(), cpu_mask);
 
 	local_flush_tlb();
-	if (cpu_mask)
+	if (!cpus_empty(cpu_mask))
 		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
 	preempt_enable();
 }
 
 void flush_tlb_mm (struct mm_struct * mm)
 {
-	unsigned long cpu_mask;
+	cpumask_t cpu_mask;
 
 	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
+	cpu_mask = mm->cpu_vm_mask;
+	cpu_clear(smp_processor_id(), cpu_mask);
 
 	if (current->active_mm == mm) {
 		if (current->mm)
@@ -409,7 +420,7 @@ void flush_tlb_mm (struct mm_struct * mm
 		else
 			leave_mm(smp_processor_id());
 	}
-	if (cpu_mask)
+	if (!cpus_empty(cpu_mask))
 		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
 
 	preempt_enable();
@@ -418,10 +429,11 @@ void flush_tlb_mm (struct mm_struct * mm
 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
 {
 	struct mm_struct *mm = vma->vm_mm;
-	unsigned long cpu_mask;
+	cpumask_t cpu_mask;
 
 	preempt_disable();
-	cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
+	cpu_mask = mm->cpu_vm_mask;
+	cpu_clear(smp_processor_id(), cpu_mask);
 
 	if (current->active_mm == mm) {
 		if(current->mm)
@@ -430,7 +442,7 @@ void flush_tlb_page(struct vm_area_struc
 		 	leave_mm(smp_processor_id());
 	}
 
-	if (cpu_mask)
+	if (!cpus_empty(cpu_mask))
 		flush_tlb_others(cpu_mask, mm, va);
 
 	preempt_enable();
@@ -457,7 +469,9 @@ void flush_tlb_all(void)
  */
 void smp_send_reschedule(int cpu)
 {
-	send_IPI_mask(1 << cpu, RESCHEDULE_VECTOR);
+	cpumask_t cpumask = CPU_MASK_NONE;
+	cpu_set(cpu, cpumask);
+	send_IPI_mask(cpumask, RESCHEDULE_VECTOR);
 }
 
 /*
@@ -544,7 +558,7 @@ static void stop_this_cpu (void * dummy)
 	/*
 	 * Remove this CPU:
 	 */
-	clear_bit(smp_processor_id(), &cpu_online_map);
+	cpu_clear(smp_processor_id(), cpu_online_map);
 	local_irq_disable();
 	disable_local_APIC();
 	if (cpu_data[smp_processor_id()].hlt_works_ok)
diff -urpN linux-2.5.67-bk6/arch/i386/kernel/smpboot.c cpu-2.5.67-bk6-1/arch/i386/kernel/smpboot.c
--- linux-2.5.67-bk6/arch/i386/kernel/smpboot.c	2003-04-15 14:37:51.000000000 -0700
+++ cpu-2.5.67-bk6-1/arch/i386/kernel/smpboot.c	2003-04-15 14:39:36.000000000 -0700
@@ -62,11 +62,11 @@ int smp_num_siblings = 1;
 int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
 
 /* Bitmask of currently online CPUs */
-unsigned long cpu_online_map;
+cpumask_t cpu_online_map;
 
-static volatile unsigned long cpu_callin_map;
-volatile unsigned long cpu_callout_map;
-static unsigned long smp_commenced_mask;
+static volatile cpumask_t cpu_callin_map;
+volatile cpumask_t cpu_callout_map;
+static cpumask_t smp_commenced_mask;
 
 /* Per CPU bogomips and other parameters */
 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
@@ -268,7 +268,7 @@ static void __init synchronize_tsc_bp (v
 
 	sum = 0;
 	for (i = 0; i < NR_CPUS; i++) {
-		if (test_bit(i, &cpu_callout_map)) {
+		if (cpu_isset(i, cpu_callout_map)) {
 			t0 = tsc_values[i];
 			sum += t0;
 		}
@@ -277,7 +277,7 @@ static void __init synchronize_tsc_bp (v
 
 	sum = 0;
 	for (i = 0; i < NR_CPUS; i++) {
-		if (!test_bit(i, &cpu_callout_map))
+		if (!cpu_isset(i, cpu_callout_map))
 			continue;
 		delta = tsc_values[i] - avg;
 		if (delta < 0)
@@ -353,7 +353,7 @@ void __init smp_callin(void)
 	 */
 	phys_id = GET_APIC_ID(apic_read(APIC_ID));
 	cpuid = smp_processor_id();
-	if (test_bit(cpuid, &cpu_callin_map)) {
+	if (cpu_isset(cpuid, cpu_callin_map)) {
 		printk("huh, phys CPU#%d, CPU#%d already present??\n",
 					phys_id, cpuid);
 		BUG();
@@ -376,7 +376,7 @@ void __init smp_callin(void)
 		/*
 		 * Has the boot CPU finished it's STARTUP sequence?
 		 */
-		if (test_bit(cpuid, &cpu_callout_map))
+		if (cpu_isset(cpuid, cpu_callout_map))
 			break;
 		rep_nop();
 	}
@@ -417,7 +417,7 @@ void __init smp_callin(void)
 	/*
 	 * Allow the master to continue.
 	 */
-	set_bit(cpuid, &cpu_callin_map);
+	cpu_set(cpuid, cpu_callin_map);
 
 	/*
 	 *      Synchronize the TSC with the BP
@@ -442,7 +442,7 @@ int __init start_secondary(void *unused)
 	 */
 	cpu_init();
 	smp_callin();
-	while (!test_bit(smp_processor_id(), &smp_commenced_mask))
+	while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
 		rep_nop();
 	setup_secondary_APIC_clock();
 	if (nmi_watchdog == NMI_IO_APIC) {
@@ -456,7 +456,7 @@ int __init start_secondary(void *unused)
 	 * the local TLBs too.
 	 */
 	local_flush_tlb();
-	set_bit(smp_processor_id(), &cpu_online_map);
+	cpu_set(smp_processor_id(), cpu_online_map);
 	wmb();
 	return cpu_idle();
 }
@@ -499,8 +499,8 @@ static struct task_struct * __init fork_
 #ifdef CONFIG_NUMA
 
 /* which logical CPUs are on which nodes */
-volatile unsigned long node_2_cpu_mask[MAX_NR_NODES] = 
-						{ [0 ... MAX_NR_NODES-1] = 0 };
+volatile cpumask_t node_2_cpu_mask[MAX_NR_NODES] = 
+				{ [0 ... MAX_NR_NODES-1] = CPU_MASK_NONE };
 /* which node each logical CPU is on */
 volatile int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 };
 
@@ -508,7 +508,7 @@ volatile int cpu_2_node[NR_CPUS] = { [0 
 static inline void map_cpu_to_node(int cpu, int node)
 {
 	printk("Mapping cpu %d to node %d\n", cpu, node);
-	node_2_cpu_mask[node] |= (1 << cpu);
+	cpu_set(cpu, node_2_cpu_mask[node]);
 	cpu_2_node[cpu] = node;
 }
 
@@ -519,7 +519,7 @@ static inline void unmap_cpu_to_node(int
 
 	printk("Unmapping cpu %d from all nodes\n", cpu);
 	for (node = 0; node < MAX_NR_NODES; node ++)
-		node_2_cpu_mask[node] &= ~(1 << cpu);
+		cpu_clear(cpu, node_2_cpu_mask[node]);
 	cpu_2_node[cpu] = -1;
 }
 #else /* !CONFIG_NUMA */
@@ -770,7 +770,7 @@ wakeup_secondary_cpu(int phys_apicid, un
 }
 #endif	/* WAKE_SECONDARY_VIA_INIT */
 
-extern unsigned long cpu_initialized;
+extern cpumask_t cpu_initialized;
 
 static int __init do_boot_cpu(int apicid)
 /*
@@ -835,19 +835,19 @@ static int __init do_boot_cpu(int apicid
 		 * allow APs to start initializing.
 		 */
 		Dprintk("Before Callout %d.\n", cpu);
-		set_bit(cpu, &cpu_callout_map);
+		cpu_set(cpu, cpu_callout_map);
 		Dprintk("After Callout %d.\n", cpu);
 
 		/*
 		 * Wait 5s total for a response
 		 */
 		for (timeout = 0; timeout < 50000; timeout++) {
-			if (test_bit(cpu, &cpu_callin_map))
+			if (cpu_isset(cpu, cpu_callin_map))
 				break;	/* It has booted */
 			udelay(100);
 		}
 
-		if (test_bit(cpu, &cpu_callin_map)) {
+		if (cpu_isset(cpu, cpu_callin_map)) {
 			/* number CPUs logically, starting from 1 (BSP is 0) */
 			Dprintk("OK.\n");
 			printk("CPU%d: ", cpu);
@@ -868,8 +868,8 @@ static int __init do_boot_cpu(int apicid
 	if (boot_error) {
 		/* Try to put things back the way they were before ... */
 		unmap_cpu_to_logical_apicid(cpu);
-		clear_bit(cpu, &cpu_callout_map); /* was set here (do_boot_cpu()) */
-		clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
+		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
+		cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
 		cpucount--;
 	}
 
@@ -956,7 +956,8 @@ static void __init smp_boot_cpus(unsigne
 	if (!smp_found_config) {
 		printk(KERN_NOTICE "SMP motherboard not detected.\n");
 		smpboot_clear_io_apic_irqs();
-		phys_cpu_present_map = 1;
+		cpus_clear(phys_cpu_present_map);
+		cpu_set(1, phys_cpu_present_map);
 		if (APIC_init_uniprocessor())
 			printk(KERN_NOTICE "Local APIC not detected."
 					   " Using dummy APIC emulation.\n");
@@ -972,7 +973,7 @@ static void __init smp_boot_cpus(unsigne
 	if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
 		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
 				boot_cpu_physical_apicid);
-		phys_cpu_present_map |= (1 << hard_smp_processor_id());
+		cpu_set(hard_smp_processor_id(), phys_cpu_present_map);
 	}
 
 	/*
@@ -983,7 +984,8 @@ static void __init smp_boot_cpus(unsigne
 			boot_cpu_physical_apicid);
 		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
 		smpboot_clear_io_apic_irqs();
-		phys_cpu_present_map = 1;
+		cpus_clear(phys_cpu_present_map);
+		cpu_set(1, phys_cpu_present_map);
 		return;
 	}
 
@@ -996,7 +998,7 @@ static void __init smp_boot_cpus(unsigne
 		smp_found_config = 0;
 		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
 		smpboot_clear_io_apic_irqs();
-		phys_cpu_present_map = 1;
+		cpus_clear(phys_cpu_present_map);
 		return;
 	}
 
@@ -1051,7 +1053,7 @@ static void __init smp_boot_cpus(unsigne
 	} else {
 		unsigned long bogosum = 0;
 		for (cpu = 0; cpu < NR_CPUS; cpu++)
-			if (cpu_callout_map & (1<<cpu))
+			if (cpu_isset(cpu, cpu_callout_map))
 				bogosum += cpu_data[cpu].loops_per_jiffy;
 		printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
 			cpucount+1,
@@ -1083,10 +1085,10 @@ static void __init smp_boot_cpus(unsigne
 		
 		for (cpu = 0; cpu < NR_CPUS; cpu++) {
 			int 	i;
-			if (!test_bit(cpu, &cpu_callout_map)) continue;
+			if (!cpu_isset(cpu, cpu_callout_map)) continue;
 
 			for (i = 0; i < NR_CPUS; i++) {
-				if (i == cpu || !test_bit(i, &cpu_callout_map))
+				if (i == cpu || !cpu_isset(i, cpu_callout_map))
 					continue;
 				if (phys_proc_id[cpu] == phys_proc_id[i]) {
 					cpu_sibling_map[cpu] = i;
@@ -1121,28 +1123,28 @@ void __init smp_prepare_cpus(unsigned in
 
 void __devinit smp_prepare_boot_cpu(void)
 {
-	set_bit(smp_processor_id(), &cpu_online_map);
-	set_bit(smp_processor_id(), &cpu_callout_map);
+	cpu_set(smp_processor_id(), cpu_online_map);
+	cpu_set(smp_processor_id(), cpu_callout_map);
 }
 
 int __devinit __cpu_up(unsigned int cpu)
 {
 	/* This only works at boot for x86.  See "rewrite" above. */
-	if (test_bit(cpu, &smp_commenced_mask)) {
+	if (cpu_isset(cpu, smp_commenced_mask)) {
 		local_irq_enable();
 		return -ENOSYS;
 	}
 
 	/* In case one didn't come up */
-	if (!test_bit(cpu, &cpu_callin_map)) {
+	if (!cpu_isset(cpu, cpu_callin_map)) {
 		local_irq_enable();
 		return -EIO;
 	}
 
 	local_irq_enable();
 	/* Unleash the CPU! */
-	set_bit(cpu, &smp_commenced_mask);
-	while (!test_bit(cpu, &cpu_online_map))
+	cpu_set(cpu, smp_commenced_mask);
+	while (!cpu_isset(cpu, cpu_online_map))
 		mb();
 	return 0;
 }
diff -urpN linux-2.5.67-bk6/include/asm-i386/highmem.h cpu-2.5.67-bk6-1/include/asm-i386/highmem.h
--- linux-2.5.67-bk6/include/asm-i386/highmem.h	2003-04-07 10:30:41.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/asm-i386/highmem.h	2003-04-15 14:39:40.000000000 -0700
@@ -22,6 +22,7 @@
 
 #include <linux/config.h>
 #include <linux/interrupt.h>
+#include <linux/threads.h>
 #include <asm/kmap_types.h>
 #include <asm/tlbflush.h>
 
@@ -39,7 +40,12 @@ extern void kmap_init(void);
  * easily, subsequent pte tables have to be allocated in one physical
  * chunk of RAM.
  */
+#if NR_CPUS <= 32
 #define PKMAP_BASE (0xff800000UL)
+#else
+#define PKMAP_BASE (0xff600000UL)
+#endif
+
 #ifdef CONFIG_X86_PAE
 #define LAST_PKMAP 512
 #else
diff -urpN linux-2.5.67-bk6/include/asm-i386/mach-default/mach_apic.h cpu-2.5.67-bk6-1/include/asm-i386/mach-default/mach_apic.h
--- linux-2.5.67-bk6/include/asm-i386/mach-default/mach_apic.h	2003-04-07 10:33:02.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/asm-i386/mach-default/mach_apic.h	2003-04-15 14:39:40.000000000 -0700
@@ -3,8 +3,12 @@
 
 #define APIC_DFR_VALUE	(APIC_DFR_FLAT)
 
+/*
+ * Flat mode can't support large numbers of cpus.
+ * The first word of cpu_online_map should cover us.
+ */
 #ifdef CONFIG_SMP
- #define TARGET_CPUS (cpu_online_map)
+ #define TARGET_CPUS cpus_coerce(cpu_online_map)
 #else
  #define TARGET_CPUS 0x01
 #endif
@@ -17,12 +21,13 @@
 
 #define APIC_BROADCAST_ID      0x0F
 #define check_apicid_used(bitmap, apicid) (bitmap & (1 << apicid))
-#define check_apicid_present(bit) (phys_cpu_present_map & (1 << bit))
+#define check_apicid_present(bit) cpu_isset(bit, phys_cpu_present_map)
 
+#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_UP_APIC)
 static inline int apic_id_registered(void)
 {
-	return (test_bit(GET_APIC_ID(apic_read(APIC_ID)), 
-						&phys_cpu_present_map));
+	return cpu_isset(GET_APIC_ID(apic_read(APIC_ID)),
+				phys_cpu_present_map);
 }
 
 /*
@@ -41,10 +46,14 @@ static inline void init_apic_ldr(void)
 	val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
 	apic_write_around(APIC_LDR, val);
 }
+#endif
 
-static inline ulong ioapic_phys_id_map(ulong phys_map)
+/*
+ * Only small machines use this APIC mode.
+ */
+static inline unsigned long ioapic_phys_id_map(cpumask_t phys_map)
 {
-	return phys_map;
+	return cpus_coerce(phys_map);
 }
 
 static inline void clustered_apic_check(void)
@@ -74,9 +83,12 @@ static inline int cpu_present_to_apicid(
 	return  mps_cpu;
 }
 
-static inline unsigned long apicid_to_cpu_present(int phys_apicid)
+static inline cpumask_t apicid_to_cpu_present(int phys_apicid)
 {
-	return (1ul << phys_apicid);
+	cpumask_t mask;
+	cpus_clear(mask);
+	cpu_set(phys_apicid, mask);
+	return mask;
 }
 
 static inline int mpc_apic_id(struct mpc_config_processor *m, 
@@ -96,7 +108,7 @@ static inline void setup_portio_remap(vo
 
 static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
 {
-	return test_bit(boot_cpu_physical_apicid, &phys_cpu_present_map);
+	return cpu_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
 }
 
 #endif /* __ASM_MACH_APIC_H */
diff -urpN linux-2.5.67-bk6/include/asm-i386/mach-default/mach_ipi.h cpu-2.5.67-bk6-1/include/asm-i386/mach-default/mach_ipi.h
--- linux-2.5.67-bk6/include/asm-i386/mach-default/mach_ipi.h	2003-04-07 10:32:30.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/asm-i386/mach-default/mach_ipi.h	2003-04-15 14:39:40.000000000 -0700
@@ -1,10 +1,10 @@
 #ifndef __ASM_MACH_IPI_H
 #define __ASM_MACH_IPI_H
 
-static inline void send_IPI_mask_bitmask(int mask, int vector);
+static inline void send_IPI_mask_bitmask(cpumask_t cpumask, int vector);
 static inline void __send_IPI_shortcut(unsigned int shortcut, int vector);
 
-static inline void send_IPI_mask(int mask, int vector)
+static inline void send_IPI_mask(cpumask_t mask, int vector)
 {
 	send_IPI_mask_bitmask(mask, vector);
 }
diff -urpN linux-2.5.67-bk6/include/asm-i386/mach-numaq/mach_apic.h cpu-2.5.67-bk6-1/include/asm-i386/mach-numaq/mach_apic.h
--- linux-2.5.67-bk6/include/asm-i386/mach-numaq/mach_apic.h	2003-04-07 10:31:08.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/asm-i386/mach-numaq/mach_apic.h	2003-04-15 14:39:40.000000000 -0700
@@ -12,12 +12,12 @@
 #define INT_DEST_MODE 0     /* physical delivery on LOCAL quad */
  
 #define APIC_BROADCAST_ID      0x0F
-#define check_apicid_used(bitmap, apicid) ((bitmap) & (1 << (apicid)))
-#define check_apicid_present(bit) (phys_cpu_present_map & (1 << bit))
+#define check_apicid_used(bitmap, apicid) cpu_isset(apicid, bitmap)
+#define check_apicid_present(bit)	cpu_isset(bit, phys_cpu_present_map)
 
 static inline int apic_id_registered(void)
 {
-	return (1);
+	return 1;
 }
 
 static inline void init_apic_ldr(void)
@@ -40,10 +40,10 @@ static inline int multi_timer_check(int 
 	return (apic != 0 && irq == 0);
 }
 
-static inline ulong ioapic_phys_id_map(ulong phys_map)
+static inline unsigned long ioapic_phys_id_map(cpumask_t phys_map)
 {
 	/* We don't have a good way to do this yet - hack */
-	return 0xf;
+	return 0xFUL;
 }
 
 /* Mapping from cpu number to logical apicid */
@@ -68,9 +68,14 @@ static inline int apicid_to_node(int log
 	return (logical_apicid >> 4);
 }
 
-static inline unsigned long apicid_to_cpu_present(int logical_apicid)
+static inline cpumask_t apicid_to_cpu_present(int logical_apicid)
 {
-	return ( (logical_apicid&0xf) << (4*apicid_to_node(logical_apicid)) );
+	cpumask_t mask = CPU_MASK_NONE;
+	int node = apicid_to_node(logical_apicid);
+	int cpu = __ffs(logical_apicid & 0xf);
+
+	cpu_set(cpu + 4*node, mask);
+	return mask;
 }
 
 static inline int mpc_apic_id(struct mpc_config_processor *m, 
diff -urpN linux-2.5.67-bk6/include/asm-i386/mach-numaq/mach_ipi.h cpu-2.5.67-bk6-1/include/asm-i386/mach-numaq/mach_ipi.h
--- linux-2.5.67-bk6/include/asm-i386/mach-numaq/mach_ipi.h	2003-04-07 10:32:17.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/asm-i386/mach-numaq/mach_ipi.h	2003-04-15 14:39:40.000000000 -0700
@@ -1,18 +1,19 @@
 #ifndef __ASM_MACH_IPI_H
 #define __ASM_MACH_IPI_H
 
-static inline void send_IPI_mask_sequence(int mask, int vector);
+static inline void send_IPI_mask_sequence(cpumask_t, int vector);
 
-static inline void send_IPI_mask(int mask, int vector)
+static inline void send_IPI_mask(cpumask_t mask, int vector)
 {
 	send_IPI_mask_sequence(mask, vector);
 }
 
 static inline void send_IPI_allbutself(int vector)
 {
-	unsigned long mask = cpu_online_map & ~(1 << smp_processor_id());
+	cpumask_t mask = cpu_online_map;
+	cpu_clear(smp_processor_id(), mask);
 
-	if (mask)
+	if (!cpus_empty(mask))
 		send_IPI_mask(mask, vector);
 }
 
diff -urpN linux-2.5.67-bk6/include/asm-i386/mmu_context.h cpu-2.5.67-bk6-1/include/asm-i386/mmu_context.h
--- linux-2.5.67-bk6/include/asm-i386/mmu_context.h	2003-04-07 10:30:33.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/asm-i386/mmu_context.h	2003-04-15 14:39:40.000000000 -0700
@@ -26,12 +26,12 @@ static inline void switch_mm(struct mm_s
 {
 	if (likely(prev != next)) {
 		/* stop flush ipis for the previous mm */
-		clear_bit(cpu, &prev->cpu_vm_mask);
+		cpu_clear(cpu, prev->cpu_vm_mask);
 #ifdef CONFIG_SMP
 		cpu_tlbstate[cpu].state = TLBSTATE_OK;
 		cpu_tlbstate[cpu].active_mm = next;
 #endif
-		set_bit(cpu, &next->cpu_vm_mask);
+		cpu_set(cpu, next->cpu_vm_mask);
 
 		/* Re-load page tables */
 		load_cr3(next->pgd);
@@ -45,9 +45,8 @@ static inline void switch_mm(struct mm_s
 #ifdef CONFIG_SMP
 	else {
 		cpu_tlbstate[cpu].state = TLBSTATE_OK;
-		if (cpu_tlbstate[cpu].active_mm != next)
-			BUG();
-		if (!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
+		BUG_ON(cpu_tlbstate[cpu].active_mm != next);
+		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
 			/* We were in lazy tlb mode and leave_mm disabled 
 			 * tlb flush IPI delivery. We must reload %cr3.
 			 */
diff -urpN linux-2.5.67-bk6/include/asm-i386/mpspec.h cpu-2.5.67-bk6-1/include/asm-i386/mpspec.h
--- linux-2.5.67-bk6/include/asm-i386/mpspec.h	2003-04-07 10:30:33.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/asm-i386/mpspec.h	2003-04-15 14:39:40.000000000 -0700
@@ -1,6 +1,8 @@
 #ifndef __ASM_MPSPEC_H
 #define __ASM_MPSPEC_H
 
+#include <linux/cpumask.h>
+
 /*
  * Structure definitions for SMP machines following the
  * Intel Multiprocessing Specification 1.1 and 1.4.
@@ -206,7 +208,7 @@ extern int quad_local_to_mp_bus_id [NR_C
 extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
 
 extern unsigned int boot_cpu_physical_apicid;
-extern unsigned long phys_cpu_present_map;
+extern cpumask_t phys_cpu_present_map;
 extern int smp_found_config;
 extern void find_smp_config (void);
 extern void get_smp_config (void);
diff -urpN linux-2.5.67-bk6/include/asm-i386/numaq.h cpu-2.5.67-bk6-1/include/asm-i386/numaq.h
--- linux-2.5.67-bk6/include/asm-i386/numaq.h	2003-04-07 10:31:42.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/asm-i386/numaq.h	2003-04-15 14:39:40.000000000 -0700
@@ -28,7 +28,7 @@
 
 #ifdef CONFIG_X86_NUMAQ
 
-#define MAX_NUMNODES		8
+#define MAX_NUMNODES		16
 extern void get_memcfg_numaq(void);
 #define get_memcfg_numa() get_memcfg_numaq()
 
@@ -159,7 +159,7 @@ struct sys_cfg_data {
 
 static inline unsigned long *get_zholes_size(int nid)
 {
-	return 0;
+	return NULL;
 }
 #endif /* CONFIG_X86_NUMAQ */
 #endif /* NUMAQ_H */
diff -urpN linux-2.5.67-bk6/include/asm-i386/smp.h cpu-2.5.67-bk6-1/include/asm-i386/smp.h
--- linux-2.5.67-bk6/include/asm-i386/smp.h	2003-04-07 10:31:47.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/asm-i386/smp.h	2003-04-15 14:39:40.000000000 -0700
@@ -8,6 +8,7 @@
 #include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/threads.h>
+#include <linux/cpumask.h>
 #endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
@@ -31,8 +32,8 @@
  */
  
 extern void smp_alloc_memory(void);
-extern unsigned long phys_cpu_present_map;
-extern unsigned long cpu_online_map;
+extern cpumask_t phys_cpu_present_map;
+extern cpumask_t cpu_online_map;
 extern volatile unsigned long smp_invalidate_needed;
 extern int pic_mode;
 extern int smp_num_siblings;
@@ -55,37 +56,19 @@ extern void zap_low_mappings (void);
  */
 #define smp_processor_id() (current_thread_info()->cpu)
 
-extern volatile unsigned long cpu_callout_map;
+extern volatile cpumask_t cpu_callout_map;
 
-#define cpu_possible(cpu) (cpu_callout_map & (1<<(cpu)))
-#define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
-
-#define for_each_cpu(cpu, mask) \
-	for(mask = cpu_online_map; \
-	    cpu = __ffs(mask), mask != 0; \
-	    mask &= ~(1<<cpu))
-
-extern inline unsigned int num_online_cpus(void)
-{
-	return hweight32(cpu_online_map);
-}
+#define cpu_possible(cpu) cpu_isset(cpu, cpu_callout_map)
 
 /* We don't mark CPUs online until __cpu_up(), so we need another measure */
 static inline int num_booting_cpus(void)
 {
-	return hweight32(cpu_callout_map);
+	return cpus_weight(cpu_callout_map);
 }
 
 extern void map_cpu_to_logical_apicid(void);
 extern void unmap_cpu_to_logical_apicid(int cpu);
 
-extern inline int any_online_cpu(unsigned int mask)
-{
-	if (mask & cpu_online_map)
-		return __ffs(mask & cpu_online_map);
-
-	return -1;
-}
 #ifdef CONFIG_X86_LOCAL_APIC
 static __inline int hard_smp_processor_id(void)
 {
diff -urpN linux-2.5.67-bk6/include/asm-i386/topology.h cpu-2.5.67-bk6-1/include/asm-i386/topology.h
--- linux-2.5.67-bk6/include/asm-i386/topology.h	2003-04-07 10:30:44.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/asm-i386/topology.h	2003-04-15 14:39:40.000000000 -0700
@@ -31,8 +31,10 @@
 
 #include <asm/mpspec.h>
 
+#include <linux/cpumask.h>
+
 /* Mappings between logical cpu number and node number */
-extern volatile unsigned long node_2_cpu_mask[];
+extern volatile cpumask_t node_2_cpu_mask[];
 extern volatile int cpu_2_node[];
 
 /* Returns the number of the node containing CPU 'cpu' */
@@ -49,7 +51,7 @@ static inline int cpu_to_node(int cpu)
 #define parent_node(node) (node)
 
 /* Returns a bitmask of CPUs on Node 'node'. */
-static inline unsigned long node_to_cpumask(int node)
+static inline cpumask_t node_to_cpumask(int node)
 {
 	return node_2_cpu_mask[node];
 }
@@ -57,14 +59,15 @@ static inline unsigned long node_to_cpum
 /* Returns the number of the first CPU on Node 'node'. */
 static inline int node_to_first_cpu(int node)
 { 
-	return __ffs(node_to_cpumask(node));
+	cpumask_t mask = node_to_cpumask(node);
+	return first_cpu(mask);
 }
 
 /* Returns the number of the first MemBlk on Node 'node' */
 #define node_to_memblk(node) (node)
 
 /* Returns the number of the node containing PCI bus 'bus' */
-static inline unsigned long pcibus_to_cpumask(int bus)
+static inline cpumask_t pcibus_to_cpumask(int bus)
 {
 	return node_to_cpumask(mp_bus_id_to_node[bus]);
 }

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [cpumask_t 3/3] ia64 changes for 2.5.67-bk6
  2003-04-15 22:58 ` [cpumask_t 2/3] i386 " William Lee Irwin III
@ 2003-04-15 23:08   ` William Lee Irwin III
  0 siblings, 0 replies; 5+ messages in thread
From: William Lee Irwin III @ 2003-04-15 23:08 UTC (permalink / raw)
  To: linux-kernel

On Tue, Apr 15, 2003 at 03:58:43PM -0700, William Lee Irwin III wrote:
> i386 changes for extended cpu masks. Basically force various things

ia64 changes for extended cpu masks. Written by Martin Hicks, with
some porting between 2.5.67 virgin and 2.5.67-bk6 by me (i.e. blame me
for mistakes in perfmon.c and palinfo.c, the rest is his fault =).

The same principles as i386 hold, though without quite as much cruft
to work around since it's a 64-bit arch and the interrupt controller
wasn't lobotomized at birth (or so it seems at first glance...).


diff -urpN linux-2.5.67-bk6/arch/ia64/kernel/iosapic.c cpu-2.5.67-bk6-1/arch/ia64/kernel/iosapic.c
--- linux-2.5.67-bk6/arch/ia64/kernel/iosapic.c	2003-04-07 10:31:18.000000000 -0700
+++ cpu-2.5.67-bk6-1/arch/ia64/kernel/iosapic.c	2003-04-15 14:39:36.000000000 -0700
@@ -274,7 +274,7 @@ unmask_irq (unsigned int irq)
 
 
 static void
-iosapic_set_affinity (unsigned int irq, unsigned long mask)
+iosapic_set_affinity (unsigned int irq, unsigned long arg)
 {
 #ifdef CONFIG_SMP
 	unsigned long flags;
@@ -283,16 +283,19 @@ iosapic_set_affinity (unsigned int irq, 
 	char *addr;
 	int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
 	ia64_vector vec;
+	cpumask_t tmp, mask = *(cpumask_t *)arg;
 
 	irq &= (~IA64_IRQ_REDIRECTED);
 	vec = irq_to_vector(irq);
 
-	mask &= cpu_online_map;
+	cpus_and(mask, mask, cpu_online_map);
 
-	if (!mask || vec >= IA64_NUM_VECTORS)
+	if (cpus_empty(mask) || vec >= IA64_NUM_VECTORS)
 		return;
 
-	dest = cpu_physical_id(ffz(~mask));
+	tmp = mask;
+	cpus_complement(tmp);
+	dest = cpu_physical_id(first_cpu(tmp));
 
 	rte_index = iosapic_intr_info[vec].rte_index;
 	addr = iosapic_intr_info[vec].addr;
diff -urpN linux-2.5.67-bk6/arch/ia64/kernel/irq.c cpu-2.5.67-bk6-1/arch/ia64/kernel/irq.c
--- linux-2.5.67-bk6/arch/ia64/kernel/irq.c	2003-04-07 10:32:28.000000000 -0700
+++ cpu-2.5.67-bk6-1/arch/ia64/kernel/irq.c	2003-04-15 14:39:36.000000000 -0700
@@ -806,12 +806,13 @@ int setup_irq(unsigned int irq, struct i
 static struct proc_dir_entry * root_irq_dir;
 static struct proc_dir_entry * irq_dir [NR_IRQS];
 
-#define HEX_DIGITS 8
+#define HEX_DIGITS (2*sizeof(cpumask_t))
 
-static int parse_hex_value (const char *buffer, unsigned long count, unsigned long *ret)
+static int parse_hex_value (const char *buffer, unsigned long count, 
+			    cpumask_t *ret)
 {
 	unsigned char hexnum [HEX_DIGITS];
-	unsigned long value;
+	cpumask_t value = CPU_MASK_NONE;
 	int i;
 
 	if (!count)
@@ -825,10 +826,9 @@ static int parse_hex_value (const char *
 	 * Parse the first 8 characters as a hex string, any non-hex char
 	 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
 	 */
-	value = 0;
-
 	for (i = 0; i < count; i++) {
 		unsigned int c = hexnum[i];
+		int k;
 
 		switch (c) {
 			case '0' ... '9': c -= '0'; break;
@@ -837,7 +837,10 @@ static int parse_hex_value (const char *
 		default:
 			goto out;
 		}
-		value = (value << 4) | c;
+		bitmap_shift_left(&cpus_coerce(value), &cpus_coerce(value), 4, NR_CPUS);
+		for (k = 0; k < 4; ++k) 
+			if (test_bit(k, (unsigned long *)&c))
+				cpu_set(k, value);
 	}
 out:
 	*ret = value;
@@ -848,12 +851,15 @@ out:
 
 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
 
-static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
+static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
+
 static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
 
 void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
 {
-	unsigned long mask = 1UL<<cpu_logical_id(hwid);
+	cpumask_t mask = CPU_MASK_NONE;
+
+	cpu_set(cpu_logical_id(hwid), mask);
 
 	if (irq < NR_IRQS) {
 		irq_affinity[irq] = mask;
@@ -864,10 +870,17 @@ void set_irq_affinity_info (unsigned int
 static int irq_affinity_read_proc (char *page, char **start, off_t off,
 			int count, int *eof, void *data)
 {
+	int k, len;
 	if (count < HEX_DIGITS+3)
 		return -EINVAL;
-	return sprintf (page, "%s%08lx\n", irq_redir[(unsigned long)data] ? "r " : "",
-			irq_affinity[(unsigned long)data]);
+
+	len = 0;
+	for (k = 0; k < CPU_ARRAY_SIZE; ++k) {
+		int j = sprintf(page, "%08lx\n", irq_affinity[(long)data].mask[k]);
+		len +=j;
+		page +=j;
+	}
+	return len;
 }
 
 static int irq_affinity_write_proc (struct file *file, const char *buffer,
@@ -875,7 +888,7 @@ static int irq_affinity_write_proc (stru
 {
 	unsigned int irq = (unsigned long) data;
 	int full_count = count, err;
-	unsigned long new_value;
+	cpumask_t new_value, tmp;
 	const char *buf = buffer;
 	int redir;
 
@@ -898,10 +911,12 @@ static int irq_affinity_write_proc (stru
 	 * way to make the system unusable accidentally :-) At least
 	 * one online CPU still has to be targeted.
 	 */
-	if (!(new_value & cpu_online_map))
+	cpus_and(tmp, new_value, cpu_online_map);
+	if (cpus_empty(tmp))
 		return -EINVAL;
 
-	irq_desc(irq)->handler->set_affinity(irq | (redir? IA64_IRQ_REDIRECTED : 0), new_value);
+	irq_desc(irq)->handler->set_affinity(irq | (redir? IA64_IRQ_REDIRECTED : 0), 
+					     (unsigned int)&new_value);
 
 	return full_count;
 }
@@ -911,18 +926,25 @@ static int irq_affinity_write_proc (stru
 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
 			int count, int *eof, void *data)
 {
-	unsigned long *mask = (unsigned long *) data;
+	cpumask_t *mask = (cpumask_t *)data;
+	int k, len = 0;
+
 	if (count < HEX_DIGITS+1)
 		return -EINVAL;
-	return sprintf (page, "%08lx\n", *mask);
+	for (k = 0; k < CPU_ARRAY_SIZE; ++k) {
+		int j = sprintf(page, "%08lx\n", mask->mask[k]);
+		len += j;
+		page += j;
+	}
+	return len;
 }
 
 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
 					unsigned long count, void *data)
 {
-	unsigned long *mask = (unsigned long *) data;
-	int full_count = count, err;
-	unsigned long new_value;
+	cpumask_t *mask = (cpumask_t *)data;
+	unsigned long full_count = count, err;
+	cpumask_t new_value;
 
 	err = parse_hex_value(buffer, count, &new_value);
 	if (err)
@@ -965,7 +987,7 @@ static void register_irq_proc (unsigned 
 #endif
 }
 
-unsigned long prof_cpu_mask = -1;
+cpumask_t prof_cpu_mask = CPU_MASK_ALL;
 
 void init_irq_proc (void)
 {
diff -urpN linux-2.5.67-bk6/arch/ia64/kernel/perfmon.c cpu-2.5.67-bk6-1/arch/ia64/kernel/perfmon.c
--- linux-2.5.67-bk6/arch/ia64/kernel/perfmon.c	2003-04-15 14:37:52.000000000 -0700
+++ cpu-2.5.67-bk6-1/arch/ia64/kernel/perfmon.c	2003-04-15 15:15:52.000000000 -0700
@@ -265,7 +265,7 @@ typedef struct pfm_context {
 	pfm_counter_t		ctx_soft_pmds[IA64_NUM_PMD_REGS]; /* XXX: size should be dynamic */
 
 	u64			ctx_saved_psr;		/* copy of psr used for lazy ctxsw */
-	unsigned long		ctx_saved_cpus_allowed;	/* copy of the task cpus_allowed (system wide) */
+	cpumask_t		ctx_saved_cpus_allowed;	/* copy of the task cpus_allowed (system wide) */
 	unsigned int		ctx_cpu;		/* CPU used by system wide session */
 
 	atomic_t		ctx_last_cpu;		/* CPU id of current or last CPU used */
@@ -909,9 +909,9 @@ error_kmalloc:
 }
 
 static int
-pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned long cpu_mask)
+pfm_reserve_session(struct task_struct *task, int is_syswide, cpumask_t cpu_mask)
 {
-	unsigned long m, undo_mask;
+	cpumask_t m, undo_mask;
 	unsigned int n, i;
 
 	/*
@@ -929,18 +929,20 @@ pfm_reserve_session(struct task_struct *
 			goto abort;
 		}
 
-		m = cpu_mask; undo_mask = 0UL; n = 0;
+		m = cpu_mask;
+		cpus_clear(undo_mask);
+		n = 0;
 		DBprintk(("cpu_mask=0x%lx\n", cpu_mask));
-		for(i=0; m; i++, m>>=1) {
+		for(i = 0; !cpus_empty(m); i++, cpus_shift_right(m, m, 1)) {
 
-			if ((m & 0x1) == 0UL) continue;
+			if (!cpu_isset(0, m)) continue;
 
 			if (pfm_sessions.pfs_sys_session[i]) goto undo;
 
 			DBprintk(("reserving CPU%d currently on CPU%d\n", i, smp_processor_id()));
 
 			pfm_sessions.pfs_sys_session[i] = task;
-			undo_mask |= 1UL << i;
+			cpu_set(i, undo_mask);
 			n++;
 		}
 		pfm_sessions.pfs_sys_sessions += n;
@@ -957,7 +959,7 @@ undo:
 	DBprintk(("system wide not possible, conflicting session [%d] on CPU%d\n",
   		pfm_sessions.pfs_sys_session[i]->pid, i));
 
-	for(i=0; undo_mask; i++, undo_mask >>=1) {
+	for(i=0; !cpus_empty(undo_mask); i++, cpus_shift_right(undo_mask, undo_mask, 1)) {
 		pfm_sessions.pfs_sys_session[i] = NULL;
 	}
 abort:
@@ -968,10 +970,10 @@ abort:
 }
 
 static int
-pfm_unreserve_session(struct task_struct *task, int is_syswide, unsigned long cpu_mask)
+pfm_unreserve_session(struct task_struct *task, int is_syswide, cpumask_t cpu_mask)
 {
 	pfm_context_t *ctx;
-	unsigned long m;
+	cpumask_t m;
 	unsigned int n, i;
 
 	ctx = task ? task->thread.pfm_context : NULL;
@@ -981,19 +983,11 @@ pfm_unreserve_session(struct task_struct
 	 */
 	LOCK_PFS();
 
-	DBprintk(("[%d] sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu_mask=0x%lx\n",
-		task->pid,
-		pfm_sessions.pfs_sys_sessions,
-		pfm_sessions.pfs_task_sessions,
-		pfm_sessions.pfs_sys_use_dbregs,
-		is_syswide,
-		cpu_mask));
-
 
 	if (is_syswide) {
 		m = cpu_mask; n = 0;
-		for(i=0; m; i++, m>>=1) {
-			if ((m & 0x1) == 0UL) continue;
+		for(i=0; !cpus_empty(m); i++, cpus_shift_right(m, m, 1)) {
+			if (cpu_isset(0, m)) continue;
 			pfm_sessions.pfs_sys_session[i] = NULL;
 			n++;
 		}
@@ -1040,6 +1034,7 @@ static int
 pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
 {
 	unsigned long smpl_pmds = pfx->ctx_smpl_regs[0];
+	cpumask_t tmp;
 	int ctx_flags;
 	int cpu;
 
@@ -1058,7 +1053,6 @@ pfx_is_sane(struct task_struct *task, pf
 	}
 
 	if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
-		DBprintk(("cpu_mask=0x%lx\n", pfx->ctx_cpu_mask));
 		/*
 		 * cannot block in this mode 
 		 */
@@ -1069,24 +1063,25 @@ pfx_is_sane(struct task_struct *task, pf
 		/*
 		 * must only have one bit set in the CPU mask
 		 */
-		if (hweight64(pfx->ctx_cpu_mask) != 1UL) {
+		if (cpus_weight(pfx->ctx_cpu_mask) != 1UL) {
 			DBprintk(("invalid CPU mask specified\n"));
 			return -EINVAL;
 		}
 		/*
 		 * and it must be a valid CPU
 		 */
-		cpu = ffz(~pfx->ctx_cpu_mask);
-		if (cpu_online(cpu) == 0) {
+		tmp = pfx->ctx_cpu_mask;
+		cpus_complement(tmp);
+		cpu = first_cpu(tmp);
+		if (!cpu_online(cpu)) {
 			DBprintk(("CPU%d is not online\n", cpu));
 			return -EINVAL;
 		}
 		/*
 		 * check for pre-existing pinning, if conflicting reject
 		 */
-		if (task->cpus_allowed != ~0UL && (task->cpus_allowed & (1UL<<cpu)) == 0) {
-			DBprintk(("[%d] pinned on 0x%lx, mask for CPU%d \n", task->pid, 
-				task->cpus_allowed, cpu));
+		if (cpus_weight(task->cpus_allowed) != NR_CPUS &&
+		    !cpu_isset(cpu, task->cpus_allowed)) {
 			return -EINVAL;
 		}
 
@@ -1125,6 +1120,7 @@ pfm_context_create(struct task_struct *t
 	int ret;
 	int ctx_flags;
 	pid_t notify_pid;
+	cpumask_t tmpmask;
 
 	/* a context has already been defined */
 	if (ctx) return -EBUSY;
@@ -1238,7 +1234,9 @@ pfm_context_create(struct task_struct *t
 	ctx->ctx_fl_protected = 0;
 
 	/* for system wide mode only (only 1 bit set) */
-	ctx->ctx_cpu = ffz(~tmp.ctx_cpu_mask);
+	tmpmask = tmp.ctx_cpu_mask;
+	cpus_complement(tmpmask);
+	ctx->ctx_cpu = first_cpu(tmpmask);
 
 	atomic_set(&ctx->ctx_last_cpu,-1); /* SMP only, means no CPU */
 
@@ -1268,7 +1266,6 @@ pfm_context_create(struct task_struct *t
 	if (ctx->ctx_fl_system) {
 		ctx->ctx_saved_cpus_allowed = task->cpus_allowed;
 		set_cpus_allowed(task, tmp.ctx_cpu_mask);
-		DBprintk(("[%d] rescheduled allowed=0x%lx\n", task->pid, task->cpus_allowed));
 	}
 
 	return 0;
@@ -3148,7 +3145,7 @@ pfm_proc_info(char *page)
 	p += sprintf(p, "ovfl_mask              : 0x%lx\n", pmu_conf.ovfl_val);
 
 	for(i=0; i < NR_CPUS; i++) {
-		if (cpu_online(i) == 0) continue;
+		if (!cpu_online(i)) continue;
 		p += sprintf(p, "CPU%-2d overflow intrs   : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_count);
 		p += sprintf(p, "CPU%-2d spurious intrs   : %lu\n", i, pfm_stats[i].pfm_spurious_ovfl_intr_count);
 		p += sprintf(p, "CPU%-2d recorded samples : %lu\n", i, pfm_stats[i].pfm_recorded_samples_count);
@@ -3779,15 +3776,9 @@ pfm_inherit(struct task_struct *task, st
 	/*
 	 * clear cpu pinning restriction for child
 	 */
-	if (ctx->ctx_fl_system) {
+	if (ctx->ctx_fl_system)
 		set_cpus_allowed(task, ctx->ctx_saved_cpus_allowed);
 
-	 	DBprintk(("setting cpus_allowed for [%d] to 0x%lx from 0x%lx\n", 
-			task->pid,
-			ctx->ctx_saved_cpus_allowed, 
-			current->cpus_allowed));
-	}
-
 	/*
 	 * takes care of easiest case first
 	 */
@@ -3934,6 +3925,7 @@ void
 pfm_context_exit(struct task_struct *task)
 {
 	pfm_context_t *ctx = task->thread.pfm_context;
+	cpumask_t mask = CPU_MASK_NONE;
 
 	/*
 	 * check sampling buffer
@@ -4033,7 +4025,8 @@ pfm_context_exit(struct task_struct *tas
 	UNLOCK_CTX(ctx);
 	preempt_enable();
 
-	pfm_unreserve_session(task, ctx->ctx_fl_system, 1UL << ctx->ctx_cpu);
+	cpu_set(ctx->ctx_cpu, mask);
+	pfm_unreserve_session(task, ctx->ctx_fl_system, mask);
 
 	if (ctx->ctx_fl_system) {
 		/*
diff -urpN linux-2.5.67-bk6/arch/ia64/kernel/setup.c cpu-2.5.67-bk6-1/arch/ia64/kernel/setup.c
--- linux-2.5.67-bk6/arch/ia64/kernel/setup.c	2003-04-07 10:30:43.000000000 -0700
+++ cpu-2.5.67-bk6-1/arch/ia64/kernel/setup.c	2003-04-15 14:39:40.000000000 -0700
@@ -541,7 +541,7 @@ static void *
 c_start (struct seq_file *m, loff_t *pos)
 {
 #ifdef CONFIG_SMP
-	while (*pos < NR_CPUS && !(cpu_online_map & (1UL << *pos)))
+	while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
 		++*pos;
 #endif
 	return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
diff -urpN linux-2.5.67-bk6/arch/ia64/kernel/smp.c cpu-2.5.67-bk6-1/arch/ia64/kernel/smp.c
--- linux-2.5.67-bk6/arch/ia64/kernel/smp.c	2003-04-07 10:32:58.000000000 -0700
+++ cpu-2.5.67-bk6-1/arch/ia64/kernel/smp.c	2003-04-15 14:39:40.000000000 -0700
@@ -81,7 +81,7 @@ stop_this_cpu (void)
 	/*
 	 * Remove this CPU:
 	 */
-	clear_bit(smp_processor_id(), &cpu_online_map);
+	cpu_clear(smp_processor_id(), cpu_online_map);
 	max_xtp();
 	local_irq_disable();
 	cpu_halt();
diff -urpN linux-2.5.67-bk6/arch/ia64/kernel/smpboot.c cpu-2.5.67-bk6-1/arch/ia64/kernel/smpboot.c
--- linux-2.5.67-bk6/arch/ia64/kernel/smpboot.c	2003-04-15 14:37:52.000000000 -0700
+++ cpu-2.5.67-bk6-1/arch/ia64/kernel/smpboot.c	2003-04-15 14:39:40.000000000 -0700
@@ -79,13 +79,13 @@ int cpucount;
 task_t *task_for_booting_cpu;
 
 /* Bitmask of currently online CPUs */
-volatile unsigned long cpu_online_map;
-unsigned long phys_cpu_present_map;
+volatile cpumask_t cpu_online_map;
+cpumask_t phys_cpu_present_map;
 
 /* which logical CPU number maps to which CPU (physical APIC ID) */
 volatile int ia64_cpu_to_sapicid[NR_CPUS];
 
-static volatile unsigned long cpu_callin_map;
+static volatile cpumask_t cpu_callin_map;
 
 struct smp_boot_data smp_boot_data __initdata;
 
@@ -271,7 +271,7 @@ smp_callin (void)
 	cpuid = smp_processor_id();
 	phys_id = hard_smp_processor_id();
 
-	if (test_and_set_bit(cpuid, &cpu_online_map)) {
+	if (cpu_test_and_set(cpuid, cpu_online_map)) {
 		printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
 		       phys_id, cpuid);
 		BUG();
@@ -313,7 +313,7 @@ smp_callin (void)
 	/*
 	 * Allow the master to continue.
 	 */
-	set_bit(cpuid, &cpu_callin_map);
+	cpu_set(cpuid, cpu_callin_map);
 	Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
 }
 
@@ -376,19 +376,19 @@ do_boot_cpu (int sapicid, int cpu)
 	 */
 	Dprintk("Waiting on callin_map ...");
 	for (timeout = 0; timeout < 100000; timeout++) {
-		if (test_bit(cpu, &cpu_callin_map))
+		if (cpu_isset(cpu, cpu_callin_map))
 			break;  /* It has booted */
 		udelay(100);
 	}
 	Dprintk("\n");
 
-	if (test_bit(cpu, &cpu_callin_map)) {
+	if (cpu_isset(cpu, cpu_callin_map)) {
 		/* number CPUs logically, starting from 1 (BSP is 0) */
 		printk(KERN_INFO "CPU%d: CPU has booted.\n", cpu);
 	} else {
 		printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
 		ia64_cpu_to_sapicid[cpu] = -1;
-		clear_bit(cpu, &cpu_online_map);  /* was set in smp_callin() */
+		cpu_clear(cpu, cpu_online_map);  /* was set in smp_callin() */
 		return -EINVAL;
 	}
 	return 0;
@@ -418,13 +418,14 @@ smp_build_cpu_map (void)
 		ia64_cpu_to_sapicid[cpu] = -1;
 
 	ia64_cpu_to_sapicid[0] = boot_cpu_id;
-	phys_cpu_present_map = 1;
+	cpus_clear(phys_cpu_present_map);
+	cpu_set(1, phys_cpu_present_map);
 
 	for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
 		sapicid = smp_boot_data.cpu_phys_id[i];
 		if (sapicid == -1 || sapicid == boot_cpu_id)
 			continue;
-		phys_cpu_present_map |= (1 << cpu);
+		cpu_set(cpu, phys_cpu_present_map);
 		ia64_cpu_to_sapicid[cpu] = sapicid;
 		cpu++;
 	}
@@ -435,7 +436,7 @@ smp_build_cpu_map (void)
 /* on which node is each logical CPU (one cacheline even for 64 CPUs) */
 volatile char cpu_to_node_map[NR_CPUS] __cacheline_aligned;
 /* which logical CPUs are on which nodes */
-volatile unsigned long node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
+volatile cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
 
 /*
  * Build cpu to node mapping and initialize the per node cpu masks.
@@ -446,7 +447,7 @@ build_cpu_to_node_map (void)
 	int cpu, i, node;
 
 	for(node=0; node<MAX_NUMNODES; node++)
-		node_to_cpu_mask[node] = 0;
+		cpus_clear(node_to_cpu_mask[node]);
 	for(cpu = 0; cpu < NR_CPUS; ++cpu) {
 		/*
 		 * All Itanium NUMA platforms I know use ACPI, so maybe we
@@ -464,7 +465,7 @@ build_cpu_to_node_map (void)
 #endif
 		cpu_to_node_map[cpu] = node;
 		if (node >= 0)
-			node_to_cpu_mask[node] |= (1UL << cpu);
+			cpu_set(cpu, node_to_cpu_mask[node]);
 	}
 }
 
@@ -487,8 +488,8 @@ smp_prepare_cpus (unsigned int max_cpus)
 	/*
 	 * We have the boot CPU online for sure.
 	 */
-	set_bit(0, &cpu_online_map);
-	set_bit(0, &cpu_callin_map);
+	cpu_set(0, cpu_online_map);
+	cpu_set(0, cpu_callin_map);
 
 	local_cpu_data->loops_per_jiffy = loops_per_jiffy;
 	ia64_cpu_to_sapicid[0] = boot_cpu_id;
@@ -503,15 +504,18 @@ smp_prepare_cpus (unsigned int max_cpus)
 	 */
 	if (!max_cpus) {
 		printk(KERN_INFO "SMP mode deactivated.\n");
-		cpu_online_map = phys_cpu_present_map = 1;
+		cpus_clear(cpu_online_map);
+		cpus_clear(phys_cpu_present_map);
+		cpu_set(1, cpu_online_map);
+		cpu_set(1, phys_cpu_present_map);
 		return;
 	}
 }
 
 void __devinit smp_prepare_boot_cpu(void)
 {
-	set_bit(smp_processor_id(), &cpu_online_map);
-	set_bit(smp_processor_id(), &cpu_callin_map);
+	cpu_set(smp_processor_id(), cpu_online_map);
+	cpu_set(smp_processor_id(), cpu_callin_map);
 }
 
 void
diff -urpN linux-2.5.67-bk6/arch/ia64/kernel/time.c cpu-2.5.67-bk6-1/arch/ia64/kernel/time.c
--- linux-2.5.67-bk6/arch/ia64/kernel/time.c	2003-04-15 14:37:52.000000000 -0700
+++ cpu-2.5.67-bk6-1/arch/ia64/kernel/time.c	2003-04-15 14:39:40.000000000 -0700
@@ -38,13 +38,13 @@ unsigned long last_cli_ip;
 static void
 do_profile (unsigned long ip)
 {
-	extern unsigned long prof_cpu_mask;
+	extern cpumask_t prof_cpu_mask;
 	extern char _stext;
 
 	if (!prof_buffer)
 		return;
 
-	if (!((1UL << smp_processor_id()) & prof_cpu_mask))
+	if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
 		return;
 
 	ip -= (unsigned long) &_stext;
diff -urpN linux-2.5.67-bk6/include/asm-ia64/perfmon.h cpu-2.5.67-bk6-1/include/asm-ia64/perfmon.h
--- linux-2.5.67-bk6/include/asm-ia64/perfmon.h	2003-04-07 10:31:45.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/asm-ia64/perfmon.h	2003-04-15 14:39:40.000000000 -0700
@@ -71,7 +71,7 @@ typedef struct {
 	int	      ctx_flags;	/* noblock/block, inherit flags */
 	void	      *ctx_smpl_vaddr;	/* returns address of BTB buffer */
 
-	unsigned long ctx_cpu_mask;	/* on which CPU to enable perfmon (systemwide) */
+	cpumask_t     ctx_cpu_mask;	/* on which CPU to enable perfmon (systemwide) */
 
 	unsigned long reserved[8];	/* for future use */
 } pfarg_context_t;
diff -urpN linux-2.5.67-bk6/include/asm-ia64/smp.h cpu-2.5.67-bk6-1/include/asm-ia64/smp.h
--- linux-2.5.67-bk6/include/asm-ia64/smp.h	2003-04-15 14:38:01.000000000 -0700
+++ cpu-2.5.67-bk6-1/include/asm-ia64/smp.h	2003-04-15 14:39:40.000000000 -0700
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 #include <linux/threads.h>
 #include <linux/kernel.h>
+#include <linux/cpumask.h>
 
 #include <asm/bitops.h>
 #include <asm/io.h>
@@ -37,8 +38,8 @@ extern struct smp_boot_data {
 
 extern char no_int_routing __initdata;
 
-extern unsigned long phys_cpu_present_map;
-extern volatile unsigned long cpu_online_map;
+extern cpumask_t phys_cpu_present_map;
+extern volatile cpumask_t cpu_online_map;
 extern unsigned long ipi_base_addr;
 extern unsigned char smp_int_redirect;
 
@@ -47,23 +48,6 @@ extern volatile int ia64_cpu_to_sapicid[
 
 extern unsigned long ap_wakeup_vector;
 
-#define cpu_possible(cpu)	(phys_cpu_present_map & (1UL << (cpu)))
-#define cpu_online(cpu)		(cpu_online_map & (1UL << (cpu)))
-
-static inline unsigned int
-num_online_cpus (void)
-{
-	return hweight64(cpu_online_map);
-}
-
-static inline int
-any_online_cpu (unsigned int mask)
-{
-	if (mask & cpu_online_map)
-		return __ffs(mask & cpu_online_map);
-	return -1;
-}
-
 /*
  * Function to map hard smp processor id to logical id.  Slow, so don't use this in
  * performance-critical code.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [cpumask_t 1/3] core changes for 2.5.67-bk6
  2003-04-15 22:50 [cpumask_t 1/3] core changes for 2.5.67-bk6 William Lee Irwin III
  2003-04-15 22:58 ` [cpumask_t 2/3] i386 " William Lee Irwin III
@ 2003-04-18 17:20 ` Randy.Dunlap
  2003-04-18 17:51   ` William Lee Irwin III
  1 sibling, 1 reply; 5+ messages in thread
From: Randy.Dunlap @ 2003-04-18 17:20 UTC (permalink / raw)
  To: William Lee Irwin III; +Cc: linux-kernel

On Tue, 15 Apr 2003 15:50:36 -0700 William Lee Irwin III <wli@holomorphy.com> wrote:

| Core changes for extended cpu masks. Basically use a machine word
| #if NR_CPUS < BITS_PER_LONG, otherwise, use a structure with an array
| of unsigned longs for it. Sprinkle it around the scheduler and a few
| other odd places that play with the cpu bitmasks. Back-ended by a
| bitmap ADT capable of dealing with arbitrary-width bitmaps, with the
| obvious micro-optimizations for NR_CPUS < BITS_PER_LONG and UP.
| 
| NR_CPUS % BITS_PER_LONG != 0 is invalid while NR_CPUS > BITS_PER_LONG.

Where/why this restriction (above)?
I don't see the need for it or implementation of it.

I'm only looking at the core patch.


| diff -urpN linux-2.5.67-bk6/include/linux/bitmap.h cpu-2.5.67-bk6-1/include/linux/bitmap.h
| --- linux-2.5.67-bk6/include/linux/bitmap.h	1969-12-31 16:00:00.000000000 -0800
| +++ cpu-2.5.67-bk6-1/include/linux/bitmap.h	2003-04-15 14:39:40.000000000 -0700

| +static inline void bitmap_shift_left(volatile unsigned long *,volatile unsigned long *,int,int);

Do you need this prototype?  I don't see why.

Rest of core looks good to me.

--
~Randy

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [cpumask_t 1/3] core changes for 2.5.67-bk6
  2003-04-18 17:20 ` [cpumask_t 1/3] core " Randy.Dunlap
@ 2003-04-18 17:51   ` William Lee Irwin III
  0 siblings, 0 replies; 5+ messages in thread
From: William Lee Irwin III @ 2003-04-18 17:51 UTC (permalink / raw)
  To: Randy.Dunlap; +Cc: linux-kernel

On Tue, 15 Apr 2003 15:50:36 -0700 William Lee Irwin III wrote:
> | Core changes for extended cpu masks. Basically use a machine word
> | #if NR_CPUS < BITS_PER_LONG, otherwise, use a structure with an array
> | of unsigned longs for it. Sprinkle it around the scheduler and a few
> | other odd places that play with the cpu bitmasks. Back-ended by a
> | bitmap ADT capable of dealing with arbitrary-width bitmaps, with the
> | obvious micro-optimizations for NR_CPUS < BITS_PER_LONG and UP.
> | NR_CPUS % BITS_PER_LONG != 0 is invalid while NR_CPUS > BITS_PER_LONG.

On Fri, Apr 18, 2003 at 10:20:15AM -0700, Randy.Dunlap wrote:
> Where/why this restriction (above)?
> I don't see the need for it or implementation of it.
> I'm only looking at the core patch.

I leave bits dangling otherwise.


On Tue, 15 Apr 2003 15:50:36 -0700 William Lee Irwin III wrote:
> | +static inline void bitmap_shift_left(volatile unsigned long *,volatile unsigned long *,int,int);

On Fri, Apr 18, 2003 at 10:20:15AM -0700, Randy.Dunlap wrote:
> Do you need this prototype?  I don't see why.
> Rest of core looks good to me.

Probably not. I'll nuke it.


-- wli

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2003-04-18 17:40 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2003-04-15 22:50 [cpumask_t 1/3] core changes for 2.5.67-bk6 William Lee Irwin III
2003-04-15 22:58 ` [cpumask_t 2/3] i386 " William Lee Irwin III
2003-04-15 23:08   ` [cpumask_t 3/3] ia64 " William Lee Irwin III
2003-04-18 17:20 ` [cpumask_t 1/3] core " Randy.Dunlap
2003-04-18 17:51   ` William Lee Irwin III

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox