diff -ruN linux-2.4.21-pre5/fs/proc/array.c linux-2.4.21-pre5.up-opt/fs/proc/array.c --- linux-2.4.21-pre5/fs/proc/array.c 2002-08-07 00:52:23.000000000 +0200 +++ linux-2.4.21-pre5.up-opt/fs/proc/array.c 2003-02-27 13:02:18.000000000 +0100 @@ -390,7 +390,7 @@ task->nswap, task->cnswap, task->exit_signal, - task->processor); + task_cpu(task)); if(mm) mmput(mm); return res; diff -ruN linux-2.4.21-pre5/include/linux/sched.h linux-2.4.21-pre5.up-opt/include/linux/sched.h --- linux-2.4.21-pre5/include/linux/sched.h 2003-02-27 12:58:59.000000000 +0100 +++ linux-2.4.21-pre5.up-opt/include/linux/sched.h 2003-02-27 13:02:18.000000000 +0100 @@ -566,11 +566,39 @@ return p; } +/* + * Wrappers for p->processor access. No-op on UP. + */ +#ifdef CONFIG_SMP + +static inline int task_cpu(const struct task_struct *p) +{ + return p->processor; +} + +static inline void set_task_cpu(struct task_struct *p, int cpu) +{ + p->processor = cpu; +} + +#else + +static inline int task_cpu(const struct task_struct *p) +{ + return 0; +} + +static inline void set_task_cpu(struct task_struct *p, int cpu) +{ +} + +#endif /* CONFIG_SMP */ + #define task_has_cpu(tsk) ((tsk)->cpus_runnable != ~0UL) static inline void task_set_cpu(struct task_struct *tsk, unsigned int cpu) { - tsk->processor = cpu; + set_task_cpu(tsk, cpu); tsk->cpus_runnable = 1UL << cpu; } diff -ruN linux-2.4.21-pre5/init/main.c linux-2.4.21-pre5.up-opt/init/main.c --- linux-2.4.21-pre5/init/main.c 2002-08-07 00:52:26.000000000 +0200 +++ linux-2.4.21-pre5.up-opt/init/main.c 2003-02-27 13:02:18.000000000 +0100 @@ -310,7 +310,7 @@ /* Get other processors into their bootup holding patterns. */ smp_boot_cpus(); wait_init_idle = cpu_online_map; - clear_bit(current->processor, &wait_init_idle); /* Don't wait on me! */ + clear_bit(task_cpu(current), &wait_init_idle); /* Don't wait on me! */ smp_threads_ready=1; smp_commence(); diff -ruN linux-2.4.21-pre5/kernel/fork.c linux-2.4.21-pre5.up-opt/kernel/fork.c --- linux-2.4.21-pre5/kernel/fork.c 2002-11-30 17:12:32.000000000 +0100 +++ linux-2.4.21-pre5.up-opt/kernel/fork.c 2003-02-27 13:02:18.000000000 +0100 @@ -666,7 +666,7 @@ { int i; p->cpus_runnable = ~0UL; - p->processor = current->processor; + set_task_cpu(p, task_cpu(current)); /* ?? should we just memset this ?? */ for(i = 0; i < smp_num_cpus; i++) p->per_cpu_utime[i] = p->per_cpu_stime[i] = 0; diff -ruN linux-2.4.21-pre5/kernel/sched.c linux-2.4.21-pre5.up-opt/kernel/sched.c --- linux-2.4.21-pre5/kernel/sched.c 2003-02-27 12:58:59.000000000 +0100 +++ linux-2.4.21-pre5.up-opt/kernel/sched.c 2003-02-27 13:02:18.000000000 +0100 @@ -172,7 +172,7 @@ #ifdef CONFIG_SMP /* Give a largish advantage to the same processor... */ /* (this is equivalent to penalizing other processors) */ - if (p->processor == this_cpu) + if (task_cpu(p) == this_cpu) weight += PROC_CHANGE_PENALTY; #endif @@ -221,7 +221,7 @@ * shortcut if the woken up task's last CPU is * idle now. */ - best_cpu = p->processor; + best_cpu = task_cpu(p); if (can_schedule(p, best_cpu)) { tsk = idle_task(best_cpu); if (cpu_curr(best_cpu) == tsk) { @@ -295,12 +295,12 @@ tsk = target_tsk; if (tsk) { if (oldest_idle != -1ULL) { - best_cpu = tsk->processor; + best_cpu = task_cpu(tsk); goto send_now_idle; } tsk->need_resched = 1; - if (tsk->processor != this_cpu) - smp_send_reschedule(tsk->processor); + if (task_cpu(tsk) != this_cpu) + smp_send_reschedule(task_cpu(tsk)); } return; @@ -557,7 +557,7 @@ BUG_ON(!current->active_mm); need_resched_back: prev = current; - this_cpu = prev->processor; + this_cpu = task_cpu(prev); if (unlikely(in_interrupt())) { printk("Scheduling in interrupt\n"); @@ -1364,7 +1364,7 @@ } sched_data->curr = current; sched_data->last_schedule = get_cycles(); - clear_bit(current->processor, &wait_init_idle); + clear_bit(task_cpu(current), &wait_init_idle); } extern void init_timervecs (void); @@ -1378,7 +1378,7 @@ int cpu = smp_processor_id(); int nr; - init_task.processor = cpu; + set_task_cpu(&init_task, cpu); for(nr = 0; nr < PIDHASH_SZ; nr++) pidhash[nr] = NULL; diff -ruN linux-2.4.21-pre5/kernel/signal.c linux-2.4.21-pre5.up-opt/kernel/signal.c --- linux-2.4.21-pre5/kernel/signal.c 2003-02-27 12:58:59.000000000 +0100 +++ linux-2.4.21-pre5.up-opt/kernel/signal.c 2003-02-27 13:02:18.000000000 +0100 @@ -508,8 +508,8 @@ * other than doing an extra (lightweight) IPI interrupt. */ spin_lock(&runqueue_lock); - if (task_has_cpu(t) && t->processor != smp_processor_id()) - smp_send_reschedule(t->processor); + if (task_has_cpu(t) && task_cpu(t) != smp_processor_id()) + smp_send_reschedule(task_cpu(t)); spin_unlock(&runqueue_lock); #endif /* CONFIG_SMP */