--- linux/kernel/sched.c.orig Wed Nov 21 11:12:05 2001 +++ linux/kernel/sched.c Wed Nov 21 11:44:41 2001 @@ -1112,6 +1112,132 @@ return retval; } +/* + * sys_sched_set_affinity - Set the CPU affinity mask. + * + * @pid: the PID of the process + * @mask_len: length of the bitfield + * @new_mask_ptr: user-space pointer to the new CPU mask bitfield + */ +asmlinkage int sys_sched_set_affinity(pid_t pid, unsigned int mask_len, unsigned long *new_mask_ptr) +{ + int ret, reschedule = 0; + unsigned long new_mask; + struct task_struct *p; + + /* + * Right now we support an 'unsigned long' bitmask - this can + * be extended without changing the syscall interface. + */ + if (mask_len < sizeof(new_mask)) + return -EINVAL; + + if (copy_from_user(&new_mask, new_mask_ptr, sizeof(new_mask))) + return -EFAULT; + + new_mask &= cpu_online_map; + if (!new_mask) + return -EINVAL; + + read_lock_irq(&tasklist_lock); + spin_lock(&runqueue_lock); + + ret = -ESRCH; + p = find_process_by_pid(pid); + if (!p) + goto out_unlock; + + ret = -EPERM; + if ((current->euid != p->euid) && (current->euid != p->uid) && + !capable(CAP_SYS_NICE)) + goto out_unlock; + p->cpus_allowed = new_mask; + if (!(p->cpus_runnable & p->cpus_allowed)) { + if (p == current) + reschedule = 1; +#ifdef CONFIG_SMP + else { + /* + * If running on a different CPU then + * trigger a reschedule to get the process + * moved to a legal CPU: + */ + p->need_resched = 1; + smp_send_reschedule(p->processor); + } +#endif + } + ret = 0; +out_unlock: + spin_unlock(&runqueue_lock); + read_unlock_irq(&tasklist_lock); + + /* + * Reschedule once if the current CPU is not in + * the affinity mask. (do the reschedule here so + * that kernel internal processes can call this + * interface as well.) + */ + if (reschedule) + schedule(); + + return ret; +} + +/* + * sys_sched_get_affinity - Set the CPU affinity mask. + * + * @pid: the PID of the process + * @mask_len_ptr: user-space pointer to the length of the bitfield + * @new_mask_ptr: user-space pointer to the CPU mask bitfield + */ +asmlinkage int sys_sched_get_affinity(pid_t pid, unsigned int *user_mask_len_ptr, unsigned long *user_mask_ptr) +{ + unsigned int mask_len, user_mask_len; + unsigned long mask; + struct task_struct *p; + int ret; + + mask_len = sizeof(mask); + + if (copy_from_user(&user_mask_len, user_mask_len_ptr, sizeof(user_mask_len))) + return -EFAULT; + if (copy_to_user(user_mask_len_ptr, &mask_len, sizeof(mask_len))) + return -EFAULT; + /* + * Exit if we cannot copy the full bitmask into user-space. + * But above we have copied the desired mask length to user-space + * already, so user-space has a chance to fix up. + */ + if (user_mask_len < mask_len) + return -EINVAL; + + read_lock_irq(&tasklist_lock); + spin_lock(&runqueue_lock); + + ret = -ESRCH; + p = find_process_by_pid(pid); + if (!p) + goto out_unlock; + + ret = -EPERM; + if ((current->euid != p->euid) && (current->euid != p->uid) && + !capable(CAP_SYS_NICE)) + goto out_unlock; + + mask = p->cpus_allowed & cpu_online_map; + ret = 0; +out_unlock: + spin_unlock(&runqueue_lock); + read_unlock_irq(&tasklist_lock); + + if (ret) + return ret; + if (copy_to_user(user_mask_ptr, &mask, sizeof(mask))) + return -EFAULT; + return 0; +} + static void show_task(struct task_struct * p) { unsigned long free = 0; --- linux/kernel/softirq.c.orig Wed Nov 21 11:12:05 2001 +++ linux/kernel/softirq.c Wed Nov 21 11:24:10 2001 @@ -363,15 +363,17 @@ { int bind_cpu = (int) (long) __bind_cpu; int cpu = cpu_logical_map(bind_cpu); + unsigned long cpu_mask = 1UL << cpu; daemonize(); current->nice = 19; sigfillset(¤t->blocked); /* Migrate to the right CPU */ - current->cpus_allowed = 1UL << cpu; - while (smp_processor_id() != cpu) - schedule(); + if (sys_sched_set_affinity(0, sizeof(cpu_mask), &cpu_mask)) + BUG(); + if (smp_processor_id() != cpu) + BUG(); sprintf(current->comm, "ksoftirqd_CPU%d", bind_cpu); --- linux/include/linux/sched.h.orig Wed Nov 21 11:19:56 2001 +++ linux/include/linux/sched.h Wed Nov 21 11:39:36 2001 @@ -589,6 +589,8 @@ #define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1) #define wake_up_interruptible_sync_nr(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, nr) asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru); +asmlinkage int sys_sched_set_affinity(pid_t pid, unsigned int mask_len, unsigned long *new_mask_ptr); +asmlinkage int sys_sched_get_affinity(pid_t pid, unsigned int *user_mask_len_ptr, unsigned long *user_mask_ptr); extern int in_group_p(gid_t); extern int in_egroup_p(gid_t); --- linux/arch/i386/kernel/entry.S.orig Wed Nov 21 11:12:36 2001 +++ linux/arch/i386/kernel/entry.S Wed Nov 21 11:35:24 2001 @@ -622,6 +622,8 @@ .long SYMBOL_NAME(sys_ni_syscall) /* Reserved for Security */ .long SYMBOL_NAME(sys_gettid) .long SYMBOL_NAME(sys_readahead) /* 225 */ + .long SYMBOL_NAME(sys_sched_set_affinity) + .long SYMBOL_NAME(sys_sched_get_affinity) .rept NR_syscalls-(.-sys_call_table)/4 .long SYMBOL_NAME(sys_ni_syscall)