diff -ruN linux-2.4.27-pre3/fs/proc/array.c linux-2.4.27-pre3.up-opt/fs/proc/array.c --- linux-2.4.27-pre3/fs/proc/array.c 2003-11-29 00:28:14.000000000 +0100 +++ linux-2.4.27-pre3.up-opt/fs/proc/array.c 2004-05-19 21:53:22.000000000 +0200 @@ -391,7 +391,7 @@ task->nswap, task->cnswap, task->exit_signal, - task->processor); + task_cpu(task)); if(mm) mmput(mm); return res; diff -ruN linux-2.4.27-pre3/include/linux/sched.h linux-2.4.27-pre3.up-opt/include/linux/sched.h --- linux-2.4.27-pre3/include/linux/sched.h 2004-05-19 21:25:49.000000000 +0200 +++ linux-2.4.27-pre3.up-opt/include/linux/sched.h 2004-05-19 21:53:22.000000000 +0200 @@ -560,17 +560,77 @@ return p; } +/* + * Wrappers for p->processor/cpus_allowed/cpus_runnable access. No-op on UP. + */ +#ifdef CONFIG_SMP + +static inline int task_cpu(const struct task_struct *p) +{ + return p->processor; +} + +static inline void set_task_cpu(struct task_struct *p, int cpu) +{ + p->processor = cpu; +} + +static inline unsigned long task_cpus_allowed(const struct task_struct *p) +{ + return p->cpus_allowed; +} + +static inline void set_task_cpus_allowed(struct task_struct *p, + unsigned long cpus_allowed) +{ + p->cpus_allowed = cpus_allowed; +} + +static inline void set_task_cpus_runnable(struct task_struct *p, + unsigned long cpus_runnable) +{ + p->cpus_runnable = cpus_runnable; +} + +#else + +static inline int task_cpu(const struct task_struct *p) +{ + return 0; +} + +static inline void set_task_cpu(struct task_struct *p, int cpu) +{ +} + +static inline unsigned long task_cpus_allowed(const struct task_struct *p) +{ + return ~0UL; +} + +static inline void set_task_cpus_allowed(struct task_struct *p, + unsigned long cpus_allowed) +{ +} + +static inline void set_task_cpus_runnable(struct task_struct *p, + unsigned long cpus_runnable) +{ +} + +#endif /* CONFIG_SMP */ + #define task_has_cpu(tsk) ((tsk)->cpus_runnable != ~0UL) static inline void task_set_cpu(struct task_struct *tsk, unsigned int cpu) { - tsk->processor = cpu; - tsk->cpus_runnable = 1UL << cpu; + set_task_cpu(tsk, cpu); + set_task_cpus_runnable(tsk, 1UL << cpu); } static inline void task_release_cpu(struct task_struct *tsk) { - tsk->cpus_runnable = ~0UL; + set_task_cpus_runnable(tsk, ~0UL); } /* per-UID process charging. */ diff -ruN linux-2.4.27-pre3/kernel/sched.c linux-2.4.27-pre3.up-opt/kernel/sched.c --- linux-2.4.27-pre3/kernel/sched.c 2003-11-29 00:28:15.000000000 +0100 +++ linux-2.4.27-pre3.up-opt/kernel/sched.c 2004-05-19 21:53:22.000000000 +0200 @@ -359,7 +359,7 @@ if (task_on_runqueue(p)) goto out; add_to_runqueue(p); - if (!synchronous || !(p->cpus_allowed & (1UL << smp_processor_id()))) + if (!synchronous || !(task_cpus_allowed(p) & (1UL << smp_processor_id()))) reschedule_idle(p); success = 1; out: @@ -557,7 +557,7 @@ BUG_ON(!current->active_mm); need_resched_back: prev = current; - this_cpu = prev->processor; + this_cpu = task_cpu(prev); if (unlikely(in_interrupt())) { printk("Scheduling in interrupt\n"); @@ -1364,7 +1364,7 @@ } sched_data->curr = current; sched_data->last_schedule = get_cycles(); - clear_bit(current->processor, &wait_init_idle); + clear_bit(task_cpu(current), &wait_init_idle); } extern void init_timervecs (void); @@ -1378,7 +1378,7 @@ int cpu = smp_processor_id(); int nr; - init_task.processor = cpu; + set_task_cpu(&init_task, cpu); for(nr = 0; nr < PIDHASH_SZ; nr++) pidhash[nr] = NULL; diff -ruN linux-2.4.27-pre3/kernel/softirq.c linux-2.4.27-pre3.up-opt/kernel/softirq.c --- linux-2.4.27-pre3/kernel/softirq.c 2002-11-30 17:12:32.000000000 +0100 +++ linux-2.4.27-pre3.up-opt/kernel/softirq.c 2004-05-19 21:53:22.000000000 +0200 @@ -368,7 +368,7 @@ sigfillset(¤t->blocked); /* Migrate to the right CPU */ - current->cpus_allowed = 1UL << cpu; + set_task_cpus_allowed(current, 1UL << cpu); while (smp_processor_id() != cpu) schedule();