core.c (c5b2803840817115e9b568d5054e5007ae36176b) core.c (6c37067e27867db172b988cc11b9ff921175dee5)
1/*
2 * kernel/sched/core.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and

--- 1149 unchanged lines hidden (view full) ---

1158void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
1159{
1160 cpumask_copy(&p->cpus_allowed, new_mask);
1161 p->nr_cpus_allowed = cpumask_weight(new_mask);
1162}
1163
1164void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1165{
1/*
2 * kernel/sched/core.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and

--- 1149 unchanged lines hidden (view full) ---

1158void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
1159{
1160 cpumask_copy(&p->cpus_allowed, new_mask);
1161 p->nr_cpus_allowed = cpumask_weight(new_mask);
1162}
1163
1164void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1165{
1166 struct rq *rq = task_rq(p);
1167 bool queued, running;
1168
1166 lockdep_assert_held(&p->pi_lock);
1169 lockdep_assert_held(&p->pi_lock);
1170
1171 queued = task_on_rq_queued(p);
1172 running = task_current(rq, p);
1173
1174 if (queued) {
1175 /*
1176 * Because __kthread_bind() calls this on blocked tasks without
1177 * holding rq->lock.
1178 */
1179 lockdep_assert_held(&rq->lock);
1180 dequeue_task(rq, p, 0);
1181 }
1182 if (running)
1183 put_prev_task(rq, p);
1184
1167 p->sched_class->set_cpus_allowed(p, new_mask);
1185 p->sched_class->set_cpus_allowed(p, new_mask);
1186
1187 if (running)
1188 p->sched_class->set_curr_task(rq);
1189 if (queued)
1190 enqueue_task(rq, p, 0);
1168}
1169
1170/*
1171 * Change a given task's CPU affinity. Migrate the thread to a
1172 * proper CPU and schedule it away if the CPU it's executing on
1173 * is removed from the allowed bitmask.
1174 *
1175 * NOTE: the caller must have a valid reference to the task, the

--- 7319 unchanged lines hidden ---
1191}
1192
1193/*
1194 * Change a given task's CPU affinity. Migrate the thread to a
1195 * proper CPU and schedule it away if the CPU it's executing on
1196 * is removed from the allowed bitmask.
1197 *
1198 * NOTE: the caller must have a valid reference to the task, the

--- 7319 unchanged lines hidden ---