1 /* sched.c - SPU scheduler.
2  *
3  * Copyright (C) IBM 2005
4  * Author: Mark Nutter <mnutter@us.ibm.com>
5  *
6  * 2006-03-31	NUMA domains added.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 #undef DEBUG
24 
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/kernel.h>
29 #include <linux/mm.h>
30 #include <linux/completion.h>
31 #include <linux/vmalloc.h>
32 #include <linux/smp.h>
33 #include <linux/stddef.h>
34 #include <linux/unistd.h>
35 #include <linux/numa.h>
36 #include <linux/mutex.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
42 #include <linux/marker.h>
43 
44 #include <asm/io.h>
45 #include <asm/mmu_context.h>
46 #include <asm/spu.h>
47 #include <asm/spu_csa.h>
48 #include <asm/spu_priv1.h>
49 #include "spufs.h"
50 
51 struct spu_prio_array {
52 	DECLARE_BITMAP(bitmap, MAX_PRIO);
53 	struct list_head runq[MAX_PRIO];
54 	spinlock_t runq_lock;
55 	int nr_waiting;
56 };
57 
58 static unsigned long spu_avenrun[3];
59 static struct spu_prio_array *spu_prio;
60 static struct task_struct *spusched_task;
61 static struct timer_list spusched_timer;
62 static struct timer_list spuloadavg_timer;
63 
64 /*
65  * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
66  */
67 #define NORMAL_PRIO		120
68 
69 /*
70  * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
71  * tick for every 10 CPU scheduler ticks.
72  */
73 #define SPUSCHED_TICK		(10)
74 
75 /*
76  * These are the 'tuning knobs' of the scheduler:
77  *
78  * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
79  * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
80  */
81 #define MIN_SPU_TIMESLICE	max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
82 #define DEF_SPU_TIMESLICE	(100 * HZ / (1000 * SPUSCHED_TICK))
83 
84 #define MAX_USER_PRIO		(MAX_PRIO - MAX_RT_PRIO)
85 #define SCALE_PRIO(x, prio) \
86 	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
87 
88 /*
89  * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
90  * [800ms ... 100ms ... 5ms]
91  *
92  * The higher a thread's priority, the bigger timeslices
93  * it gets during one round of execution. But even the lowest
94  * priority thread gets MIN_TIMESLICE worth of execution time.
95  */
96 void spu_set_timeslice(struct spu_context *ctx)
97 {
98 	if (ctx->prio < NORMAL_PRIO)
99 		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
100 	else
101 		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
102 }
103 
104 /*
105  * Update scheduling information from the owning thread.
106  */
107 void __spu_update_sched_info(struct spu_context *ctx)
108 {
109 	/*
110 	 * assert that the context is not on the runqueue, so it is safe
111 	 * to change its scheduling parameters.
112 	 */
113 	BUG_ON(!list_empty(&ctx->rq));
114 
115 	/*
116 	 * 32-Bit assignments are atomic on powerpc, and we don't care about
117 	 * memory ordering here because retrieving the controlling thread is
118 	 * per definition racy.
119 	 */
120 	ctx->tid = current->pid;
121 
122 	/*
123 	 * We do our own priority calculations, so we normally want
124 	 * ->static_prio to start with. Unfortunately this field
125 	 * contains junk for threads with a realtime scheduling
126 	 * policy so we have to look at ->prio in this case.
127 	 */
128 	if (rt_prio(current->prio))
129 		ctx->prio = current->prio;
130 	else
131 		ctx->prio = current->static_prio;
132 	ctx->policy = current->policy;
133 
134 	/*
135 	 * TO DO: the context may be loaded, so we may need to activate
136 	 * it again on a different node. But it shouldn't hurt anything
137 	 * to update its parameters, because we know that the scheduler
138 	 * is not actively looking at this field, since it is not on the
139 	 * runqueue. The context will be rescheduled on the proper node
140 	 * if it is timesliced or preempted.
141 	 */
142 	ctx->cpus_allowed = current->cpus_allowed;
143 
144 	/* Save the current cpu id for spu interrupt routing. */
145 	ctx->last_ran = raw_smp_processor_id();
146 }
147 
148 void spu_update_sched_info(struct spu_context *ctx)
149 {
150 	int node;
151 
152 	if (ctx->state == SPU_STATE_RUNNABLE) {
153 		node = ctx->spu->node;
154 
155 		/*
156 		 * Take list_mutex to sync with find_victim().
157 		 */
158 		mutex_lock(&cbe_spu_info[node].list_mutex);
159 		__spu_update_sched_info(ctx);
160 		mutex_unlock(&cbe_spu_info[node].list_mutex);
161 	} else {
162 		__spu_update_sched_info(ctx);
163 	}
164 }
165 
166 static int __node_allowed(struct spu_context *ctx, int node)
167 {
168 	if (nr_cpus_node(node)) {
169 		cpumask_t mask = node_to_cpumask(node);
170 
171 		if (cpus_intersects(mask, ctx->cpus_allowed))
172 			return 1;
173 	}
174 
175 	return 0;
176 }
177 
178 static int node_allowed(struct spu_context *ctx, int node)
179 {
180 	int rval;
181 
182 	spin_lock(&spu_prio->runq_lock);
183 	rval = __node_allowed(ctx, node);
184 	spin_unlock(&spu_prio->runq_lock);
185 
186 	return rval;
187 }
188 
189 void do_notify_spus_active(void)
190 {
191 	int node;
192 
193 	/*
194 	 * Wake up the active spu_contexts.
195 	 *
196 	 * When the awakened processes see their "notify_active" flag is set,
197 	 * they will call spu_switch_notify().
198 	 */
199 	for_each_online_node(node) {
200 		struct spu *spu;
201 
202 		mutex_lock(&cbe_spu_info[node].list_mutex);
203 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
204 			if (spu->alloc_state != SPU_FREE) {
205 				struct spu_context *ctx = spu->ctx;
206 				set_bit(SPU_SCHED_NOTIFY_ACTIVE,
207 					&ctx->sched_flags);
208 				mb();
209 				wake_up_all(&ctx->stop_wq);
210 			}
211 		}
212 		mutex_unlock(&cbe_spu_info[node].list_mutex);
213 	}
214 }
215 
216 /**
217  * spu_bind_context - bind spu context to physical spu
218  * @spu:	physical spu to bind to
219  * @ctx:	context to bind
220  */
221 static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
222 {
223 	spu_context_trace(spu_bind_context__enter, ctx, spu);
224 
225 	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
226 
227 	if (ctx->flags & SPU_CREATE_NOSCHED)
228 		atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
229 
230 	ctx->stats.slb_flt_base = spu->stats.slb_flt;
231 	ctx->stats.class2_intr_base = spu->stats.class2_intr;
232 
233 	spu_associate_mm(spu, ctx->owner);
234 
235 	spin_lock_irq(&spu->register_lock);
236 	spu->ctx = ctx;
237 	spu->flags = 0;
238 	ctx->spu = spu;
239 	ctx->ops = &spu_hw_ops;
240 	spu->pid = current->pid;
241 	spu->tgid = current->tgid;
242 	spu->ibox_callback = spufs_ibox_callback;
243 	spu->wbox_callback = spufs_wbox_callback;
244 	spu->stop_callback = spufs_stop_callback;
245 	spu->mfc_callback = spufs_mfc_callback;
246 	spin_unlock_irq(&spu->register_lock);
247 
248 	spu_unmap_mappings(ctx);
249 
250 	spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
251 	spu_restore(&ctx->csa, spu);
252 	spu->timestamp = jiffies;
253 	spu_switch_notify(spu, ctx);
254 	ctx->state = SPU_STATE_RUNNABLE;
255 
256 	spuctx_switch_state(ctx, SPU_UTIL_USER);
257 }
258 
259 /*
260  * Must be used with the list_mutex held.
261  */
262 static inline int sched_spu(struct spu *spu)
263 {
264 	BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
265 
266 	return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
267 }
268 
269 static void aff_merge_remaining_ctxs(struct spu_gang *gang)
270 {
271 	struct spu_context *ctx;
272 
273 	list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
274 		if (list_empty(&ctx->aff_list))
275 			list_add(&ctx->aff_list, &gang->aff_list_head);
276 	}
277 	gang->aff_flags |= AFF_MERGED;
278 }
279 
280 static void aff_set_offsets(struct spu_gang *gang)
281 {
282 	struct spu_context *ctx;
283 	int offset;
284 
285 	offset = -1;
286 	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
287 								aff_list) {
288 		if (&ctx->aff_list == &gang->aff_list_head)
289 			break;
290 		ctx->aff_offset = offset--;
291 	}
292 
293 	offset = 0;
294 	list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
295 		if (&ctx->aff_list == &gang->aff_list_head)
296 			break;
297 		ctx->aff_offset = offset++;
298 	}
299 
300 	gang->aff_flags |= AFF_OFFSETS_SET;
301 }
302 
303 static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
304 		 int group_size, int lowest_offset)
305 {
306 	struct spu *spu;
307 	int node, n;
308 
309 	/*
310 	 * TODO: A better algorithm could be used to find a good spu to be
311 	 *       used as reference location for the ctxs chain.
312 	 */
313 	node = cpu_to_node(raw_smp_processor_id());
314 	for (n = 0; n < MAX_NUMNODES; n++, node++) {
315 		int available_spus;
316 
317 		node = (node < MAX_NUMNODES) ? node : 0;
318 		if (!node_allowed(ctx, node))
319 			continue;
320 
321 		available_spus = 0;
322 		mutex_lock(&cbe_spu_info[node].list_mutex);
323 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
324 			if (spu->ctx && spu->ctx->gang
325 					&& spu->ctx->aff_offset == 0)
326 				available_spus -=
327 					(spu->ctx->gang->contexts - 1);
328 			else
329 				available_spus++;
330 		}
331 		if (available_spus < ctx->gang->contexts) {
332 			mutex_unlock(&cbe_spu_info[node].list_mutex);
333 			continue;
334 		}
335 
336 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
337 			if ((!mem_aff || spu->has_mem_affinity) &&
338 							sched_spu(spu)) {
339 				mutex_unlock(&cbe_spu_info[node].list_mutex);
340 				return spu;
341 			}
342 		}
343 		mutex_unlock(&cbe_spu_info[node].list_mutex);
344 	}
345 	return NULL;
346 }
347 
348 static void aff_set_ref_point_location(struct spu_gang *gang)
349 {
350 	int mem_aff, gs, lowest_offset;
351 	struct spu_context *ctx;
352 	struct spu *tmp;
353 
354 	mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
355 	lowest_offset = 0;
356 	gs = 0;
357 
358 	list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
359 		gs++;
360 
361 	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
362 								aff_list) {
363 		if (&ctx->aff_list == &gang->aff_list_head)
364 			break;
365 		lowest_offset = ctx->aff_offset;
366 	}
367 
368 	gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
369 							lowest_offset);
370 }
371 
372 static struct spu *ctx_location(struct spu *ref, int offset, int node)
373 {
374 	struct spu *spu;
375 
376 	spu = NULL;
377 	if (offset >= 0) {
378 		list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
379 			BUG_ON(spu->node != node);
380 			if (offset == 0)
381 				break;
382 			if (sched_spu(spu))
383 				offset--;
384 		}
385 	} else {
386 		list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
387 			BUG_ON(spu->node != node);
388 			if (offset == 0)
389 				break;
390 			if (sched_spu(spu))
391 				offset++;
392 		}
393 	}
394 
395 	return spu;
396 }
397 
398 /*
399  * affinity_check is called each time a context is going to be scheduled.
400  * It returns the spu ptr on which the context must run.
401  */
402 static int has_affinity(struct spu_context *ctx)
403 {
404 	struct spu_gang *gang = ctx->gang;
405 
406 	if (list_empty(&ctx->aff_list))
407 		return 0;
408 
409 	if (atomic_read(&ctx->gang->aff_sched_count) == 0)
410 		ctx->gang->aff_ref_spu = NULL;
411 
412 	if (!gang->aff_ref_spu) {
413 		if (!(gang->aff_flags & AFF_MERGED))
414 			aff_merge_remaining_ctxs(gang);
415 		if (!(gang->aff_flags & AFF_OFFSETS_SET))
416 			aff_set_offsets(gang);
417 		aff_set_ref_point_location(gang);
418 	}
419 
420 	return gang->aff_ref_spu != NULL;
421 }
422 
423 /**
424  * spu_unbind_context - unbind spu context from physical spu
425  * @spu:	physical spu to unbind from
426  * @ctx:	context to unbind
427  */
428 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
429 {
430 	u32 status;
431 
432 	spu_context_trace(spu_unbind_context__enter, ctx, spu);
433 
434 	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
435 
436  	if (spu->ctx->flags & SPU_CREATE_NOSCHED)
437 		atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
438 
439 	if (ctx->gang)
440 		atomic_dec_if_positive(&ctx->gang->aff_sched_count);
441 
442 	spu_switch_notify(spu, NULL);
443 	spu_unmap_mappings(ctx);
444 	spu_save(&ctx->csa, spu);
445 	spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
446 
447 	spin_lock_irq(&spu->register_lock);
448 	spu->timestamp = jiffies;
449 	ctx->state = SPU_STATE_SAVED;
450 	spu->ibox_callback = NULL;
451 	spu->wbox_callback = NULL;
452 	spu->stop_callback = NULL;
453 	spu->mfc_callback = NULL;
454 	spu->pid = 0;
455 	spu->tgid = 0;
456 	ctx->ops = &spu_backing_ops;
457 	spu->flags = 0;
458 	spu->ctx = NULL;
459 	spin_unlock_irq(&spu->register_lock);
460 
461 	spu_associate_mm(spu, NULL);
462 
463 	ctx->stats.slb_flt +=
464 		(spu->stats.slb_flt - ctx->stats.slb_flt_base);
465 	ctx->stats.class2_intr +=
466 		(spu->stats.class2_intr - ctx->stats.class2_intr_base);
467 
468 	/* This maps the underlying spu state to idle */
469 	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
470 	ctx->spu = NULL;
471 
472 	if (spu_stopped(ctx, &status))
473 		wake_up_all(&ctx->stop_wq);
474 }
475 
476 /**
477  * spu_add_to_rq - add a context to the runqueue
478  * @ctx:       context to add
479  */
480 static void __spu_add_to_rq(struct spu_context *ctx)
481 {
482 	/*
483 	 * Unfortunately this code path can be called from multiple threads
484 	 * on behalf of a single context due to the way the problem state
485 	 * mmap support works.
486 	 *
487 	 * Fortunately we need to wake up all these threads at the same time
488 	 * and can simply skip the runqueue addition for every but the first
489 	 * thread getting into this codepath.
490 	 *
491 	 * It's still quite hacky, and long-term we should proxy all other
492 	 * threads through the owner thread so that spu_run is in control
493 	 * of all the scheduling activity for a given context.
494 	 */
495 	if (list_empty(&ctx->rq)) {
496 		list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
497 		set_bit(ctx->prio, spu_prio->bitmap);
498 		if (!spu_prio->nr_waiting++)
499 			__mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
500 	}
501 }
502 
503 static void spu_add_to_rq(struct spu_context *ctx)
504 {
505 	spin_lock(&spu_prio->runq_lock);
506 	__spu_add_to_rq(ctx);
507 	spin_unlock(&spu_prio->runq_lock);
508 }
509 
510 static void __spu_del_from_rq(struct spu_context *ctx)
511 {
512 	int prio = ctx->prio;
513 
514 	if (!list_empty(&ctx->rq)) {
515 		if (!--spu_prio->nr_waiting)
516 			del_timer(&spusched_timer);
517 		list_del_init(&ctx->rq);
518 
519 		if (list_empty(&spu_prio->runq[prio]))
520 			clear_bit(prio, spu_prio->bitmap);
521 	}
522 }
523 
524 void spu_del_from_rq(struct spu_context *ctx)
525 {
526 	spin_lock(&spu_prio->runq_lock);
527 	__spu_del_from_rq(ctx);
528 	spin_unlock(&spu_prio->runq_lock);
529 }
530 
531 static void spu_prio_wait(struct spu_context *ctx)
532 {
533 	DEFINE_WAIT(wait);
534 
535 	/*
536 	 * The caller must explicitly wait for a context to be loaded
537 	 * if the nosched flag is set.  If NOSCHED is not set, the caller
538 	 * queues the context and waits for an spu event or error.
539 	 */
540 	BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
541 
542 	spin_lock(&spu_prio->runq_lock);
543 	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
544 	if (!signal_pending(current)) {
545 		__spu_add_to_rq(ctx);
546 		spin_unlock(&spu_prio->runq_lock);
547 		mutex_unlock(&ctx->state_mutex);
548 		schedule();
549 		mutex_lock(&ctx->state_mutex);
550 		spin_lock(&spu_prio->runq_lock);
551 		__spu_del_from_rq(ctx);
552 	}
553 	spin_unlock(&spu_prio->runq_lock);
554 	__set_current_state(TASK_RUNNING);
555 	remove_wait_queue(&ctx->stop_wq, &wait);
556 }
557 
558 static struct spu *spu_get_idle(struct spu_context *ctx)
559 {
560 	struct spu *spu, *aff_ref_spu;
561 	int node, n;
562 
563 	spu_context_nospu_trace(spu_get_idle__enter, ctx);
564 
565 	if (ctx->gang) {
566 		mutex_lock(&ctx->gang->aff_mutex);
567 		if (has_affinity(ctx)) {
568 			aff_ref_spu = ctx->gang->aff_ref_spu;
569 			atomic_inc(&ctx->gang->aff_sched_count);
570 			mutex_unlock(&ctx->gang->aff_mutex);
571 			node = aff_ref_spu->node;
572 
573 			mutex_lock(&cbe_spu_info[node].list_mutex);
574 			spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
575 			if (spu && spu->alloc_state == SPU_FREE)
576 				goto found;
577 			mutex_unlock(&cbe_spu_info[node].list_mutex);
578 
579 			atomic_dec(&ctx->gang->aff_sched_count);
580 			goto not_found;
581 		}
582 		mutex_unlock(&ctx->gang->aff_mutex);
583 	}
584 	node = cpu_to_node(raw_smp_processor_id());
585 	for (n = 0; n < MAX_NUMNODES; n++, node++) {
586 		node = (node < MAX_NUMNODES) ? node : 0;
587 		if (!node_allowed(ctx, node))
588 			continue;
589 
590 		mutex_lock(&cbe_spu_info[node].list_mutex);
591 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
592 			if (spu->alloc_state == SPU_FREE)
593 				goto found;
594 		}
595 		mutex_unlock(&cbe_spu_info[node].list_mutex);
596 	}
597 
598  not_found:
599 	spu_context_nospu_trace(spu_get_idle__not_found, ctx);
600 	return NULL;
601 
602  found:
603 	spu->alloc_state = SPU_USED;
604 	mutex_unlock(&cbe_spu_info[node].list_mutex);
605 	spu_context_trace(spu_get_idle__found, ctx, spu);
606 	spu_init_channels(spu);
607 	return spu;
608 }
609 
610 /**
611  * find_victim - find a lower priority context to preempt
612  * @ctx:	canidate context for running
613  *
614  * Returns the freed physical spu to run the new context on.
615  */
616 static struct spu *find_victim(struct spu_context *ctx)
617 {
618 	struct spu_context *victim = NULL;
619 	struct spu *spu;
620 	int node, n;
621 
622 	spu_context_nospu_trace(spu_find_victim__enter, ctx);
623 
624 	/*
625 	 * Look for a possible preemption candidate on the local node first.
626 	 * If there is no candidate look at the other nodes.  This isn't
627 	 * exactly fair, but so far the whole spu scheduler tries to keep
628 	 * a strong node affinity.  We might want to fine-tune this in
629 	 * the future.
630 	 */
631  restart:
632 	node = cpu_to_node(raw_smp_processor_id());
633 	for (n = 0; n < MAX_NUMNODES; n++, node++) {
634 		node = (node < MAX_NUMNODES) ? node : 0;
635 		if (!node_allowed(ctx, node))
636 			continue;
637 
638 		mutex_lock(&cbe_spu_info[node].list_mutex);
639 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
640 			struct spu_context *tmp = spu->ctx;
641 
642 			if (tmp && tmp->prio > ctx->prio &&
643 			    !(tmp->flags & SPU_CREATE_NOSCHED) &&
644 			    (!victim || tmp->prio > victim->prio))
645 				victim = spu->ctx;
646 		}
647 		mutex_unlock(&cbe_spu_info[node].list_mutex);
648 
649 		if (victim) {
650 			/*
651 			 * This nests ctx->state_mutex, but we always lock
652 			 * higher priority contexts before lower priority
653 			 * ones, so this is safe until we introduce
654 			 * priority inheritance schemes.
655 			 *
656 			 * XXX if the highest priority context is locked,
657 			 * this can loop a long time.  Might be better to
658 			 * look at another context or give up after X retries.
659 			 */
660 			if (!mutex_trylock(&victim->state_mutex)) {
661 				victim = NULL;
662 				goto restart;
663 			}
664 
665 			spu = victim->spu;
666 			if (!spu || victim->prio <= ctx->prio) {
667 				/*
668 				 * This race can happen because we've dropped
669 				 * the active list mutex.  Not a problem, just
670 				 * restart the search.
671 				 */
672 				mutex_unlock(&victim->state_mutex);
673 				victim = NULL;
674 				goto restart;
675 			}
676 
677 			spu_context_trace(__spu_deactivate__unload, ctx, spu);
678 
679 			mutex_lock(&cbe_spu_info[node].list_mutex);
680 			cbe_spu_info[node].nr_active--;
681 			spu_unbind_context(spu, victim);
682 			mutex_unlock(&cbe_spu_info[node].list_mutex);
683 
684 			victim->stats.invol_ctx_switch++;
685 			spu->stats.invol_ctx_switch++;
686 			if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags))
687 				spu_add_to_rq(victim);
688 
689 			mutex_unlock(&victim->state_mutex);
690 
691 			return spu;
692 		}
693 	}
694 
695 	return NULL;
696 }
697 
698 static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
699 {
700 	int node = spu->node;
701 	int success = 0;
702 
703 	spu_set_timeslice(ctx);
704 
705 	mutex_lock(&cbe_spu_info[node].list_mutex);
706 	if (spu->ctx == NULL) {
707 		spu_bind_context(spu, ctx);
708 		cbe_spu_info[node].nr_active++;
709 		spu->alloc_state = SPU_USED;
710 		success = 1;
711 	}
712 	mutex_unlock(&cbe_spu_info[node].list_mutex);
713 
714 	if (success)
715 		wake_up_all(&ctx->run_wq);
716 	else
717 		spu_add_to_rq(ctx);
718 }
719 
720 static void spu_schedule(struct spu *spu, struct spu_context *ctx)
721 {
722 	/* not a candidate for interruptible because it's called either
723 	   from the scheduler thread or from spu_deactivate */
724 	mutex_lock(&ctx->state_mutex);
725 	__spu_schedule(spu, ctx);
726 	spu_release(ctx);
727 }
728 
729 static void spu_unschedule(struct spu *spu, struct spu_context *ctx)
730 {
731 	int node = spu->node;
732 
733 	mutex_lock(&cbe_spu_info[node].list_mutex);
734 	cbe_spu_info[node].nr_active--;
735 	spu->alloc_state = SPU_FREE;
736 	spu_unbind_context(spu, ctx);
737 	ctx->stats.invol_ctx_switch++;
738 	spu->stats.invol_ctx_switch++;
739 	mutex_unlock(&cbe_spu_info[node].list_mutex);
740 }
741 
742 /**
743  * spu_activate - find a free spu for a context and execute it
744  * @ctx:	spu context to schedule
745  * @flags:	flags (currently ignored)
746  *
747  * Tries to find a free spu to run @ctx.  If no free spu is available
748  * add the context to the runqueue so it gets woken up once an spu
749  * is available.
750  */
751 int spu_activate(struct spu_context *ctx, unsigned long flags)
752 {
753 	struct spu *spu;
754 
755 	/*
756 	 * If there are multiple threads waiting for a single context
757 	 * only one actually binds the context while the others will
758 	 * only be able to acquire the state_mutex once the context
759 	 * already is in runnable state.
760 	 */
761 	if (ctx->spu)
762 		return 0;
763 
764 spu_activate_top:
765 	if (signal_pending(current))
766 		return -ERESTARTSYS;
767 
768 	spu = spu_get_idle(ctx);
769 	/*
770 	 * If this is a realtime thread we try to get it running by
771 	 * preempting a lower priority thread.
772 	 */
773 	if (!spu && rt_prio(ctx->prio))
774 		spu = find_victim(ctx);
775 	if (spu) {
776 		unsigned long runcntl;
777 
778 		runcntl = ctx->ops->runcntl_read(ctx);
779 		__spu_schedule(spu, ctx);
780 		if (runcntl & SPU_RUNCNTL_RUNNABLE)
781 			spuctx_switch_state(ctx, SPU_UTIL_USER);
782 
783 		return 0;
784 	}
785 
786 	if (ctx->flags & SPU_CREATE_NOSCHED) {
787 		spu_prio_wait(ctx);
788 		goto spu_activate_top;
789 	}
790 
791 	spu_add_to_rq(ctx);
792 
793 	return 0;
794 }
795 
796 /**
797  * grab_runnable_context - try to find a runnable context
798  *
799  * Remove the highest priority context on the runqueue and return it
800  * to the caller.  Returns %NULL if no runnable context was found.
801  */
802 static struct spu_context *grab_runnable_context(int prio, int node)
803 {
804 	struct spu_context *ctx;
805 	int best;
806 
807 	spin_lock(&spu_prio->runq_lock);
808 	best = find_first_bit(spu_prio->bitmap, prio);
809 	while (best < prio) {
810 		struct list_head *rq = &spu_prio->runq[best];
811 
812 		list_for_each_entry(ctx, rq, rq) {
813 			/* XXX(hch): check for affinity here aswell */
814 			if (__node_allowed(ctx, node)) {
815 				__spu_del_from_rq(ctx);
816 				goto found;
817 			}
818 		}
819 		best++;
820 	}
821 	ctx = NULL;
822  found:
823 	spin_unlock(&spu_prio->runq_lock);
824 	return ctx;
825 }
826 
827 static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
828 {
829 	struct spu *spu = ctx->spu;
830 	struct spu_context *new = NULL;
831 
832 	if (spu) {
833 		new = grab_runnable_context(max_prio, spu->node);
834 		if (new || force) {
835 			spu_unschedule(spu, ctx);
836 			if (new) {
837 				if (new->flags & SPU_CREATE_NOSCHED)
838 					wake_up(&new->stop_wq);
839 				else {
840 					spu_release(ctx);
841 					spu_schedule(spu, new);
842 					/* this one can't easily be made
843 					   interruptible */
844 					mutex_lock(&ctx->state_mutex);
845 				}
846 			}
847 		}
848 	}
849 
850 	return new != NULL;
851 }
852 
853 /**
854  * spu_deactivate - unbind a context from it's physical spu
855  * @ctx:	spu context to unbind
856  *
857  * Unbind @ctx from the physical spu it is running on and schedule
858  * the highest priority context to run on the freed physical spu.
859  */
860 void spu_deactivate(struct spu_context *ctx)
861 {
862 	spu_context_nospu_trace(spu_deactivate__enter, ctx);
863 	__spu_deactivate(ctx, 1, MAX_PRIO);
864 }
865 
866 /**
867  * spu_yield -	yield a physical spu if others are waiting
868  * @ctx:	spu context to yield
869  *
870  * Check if there is a higher priority context waiting and if yes
871  * unbind @ctx from the physical spu and schedule the highest
872  * priority context to run on the freed physical spu instead.
873  */
874 void spu_yield(struct spu_context *ctx)
875 {
876 	spu_context_nospu_trace(spu_yield__enter, ctx);
877 	if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
878 		mutex_lock(&ctx->state_mutex);
879 		__spu_deactivate(ctx, 0, MAX_PRIO);
880 		mutex_unlock(&ctx->state_mutex);
881 	}
882 }
883 
884 static noinline void spusched_tick(struct spu_context *ctx)
885 {
886 	struct spu_context *new = NULL;
887 	struct spu *spu = NULL;
888 
889 	if (spu_acquire(ctx))
890 		BUG();	/* a kernel thread never has signals pending */
891 
892 	if (ctx->state != SPU_STATE_RUNNABLE)
893 		goto out;
894 	if (ctx->flags & SPU_CREATE_NOSCHED)
895 		goto out;
896 	if (ctx->policy == SCHED_FIFO)
897 		goto out;
898 
899 	if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
900 		goto out;
901 
902 	spu = ctx->spu;
903 
904 	spu_context_trace(spusched_tick__preempt, ctx, spu);
905 
906 	new = grab_runnable_context(ctx->prio + 1, spu->node);
907 	if (new) {
908 		spu_unschedule(spu, ctx);
909 		if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
910 			spu_add_to_rq(ctx);
911 	} else {
912 		spu_context_nospu_trace(spusched_tick__newslice, ctx);
913 		if (!ctx->time_slice)
914 			ctx->time_slice++;
915 	}
916 out:
917 	spu_release(ctx);
918 
919 	if (new)
920 		spu_schedule(spu, new);
921 }
922 
923 /**
924  * count_active_contexts - count nr of active tasks
925  *
926  * Return the number of tasks currently running or waiting to run.
927  *
928  * Note that we don't take runq_lock / list_mutex here.  Reading
929  * a single 32bit value is atomic on powerpc, and we don't care
930  * about memory ordering issues here.
931  */
932 static unsigned long count_active_contexts(void)
933 {
934 	int nr_active = 0, node;
935 
936 	for (node = 0; node < MAX_NUMNODES; node++)
937 		nr_active += cbe_spu_info[node].nr_active;
938 	nr_active += spu_prio->nr_waiting;
939 
940 	return nr_active;
941 }
942 
943 /**
944  * spu_calc_load - update the avenrun load estimates.
945  *
946  * No locking against reading these values from userspace, as for
947  * the CPU loadavg code.
948  */
949 static void spu_calc_load(void)
950 {
951 	unsigned long active_tasks; /* fixed-point */
952 
953 	active_tasks = count_active_contexts() * FIXED_1;
954 	CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
955 	CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
956 	CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
957 }
958 
959 static void spusched_wake(unsigned long data)
960 {
961 	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
962 	wake_up_process(spusched_task);
963 }
964 
965 static void spuloadavg_wake(unsigned long data)
966 {
967 	mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
968 	spu_calc_load();
969 }
970 
971 static int spusched_thread(void *unused)
972 {
973 	struct spu *spu;
974 	int node;
975 
976 	while (!kthread_should_stop()) {
977 		set_current_state(TASK_INTERRUPTIBLE);
978 		schedule();
979 		for (node = 0; node < MAX_NUMNODES; node++) {
980 			struct mutex *mtx = &cbe_spu_info[node].list_mutex;
981 
982 			mutex_lock(mtx);
983 			list_for_each_entry(spu, &cbe_spu_info[node].spus,
984 					cbe_list) {
985 				struct spu_context *ctx = spu->ctx;
986 
987 				if (ctx) {
988 					mutex_unlock(mtx);
989 					spusched_tick(ctx);
990 					mutex_lock(mtx);
991 				}
992 			}
993 			mutex_unlock(mtx);
994 		}
995 	}
996 
997 	return 0;
998 }
999 
1000 void spuctx_switch_state(struct spu_context *ctx,
1001 		enum spu_utilization_state new_state)
1002 {
1003 	unsigned long long curtime;
1004 	signed long long delta;
1005 	struct timespec ts;
1006 	struct spu *spu;
1007 	enum spu_utilization_state old_state;
1008 	int node;
1009 
1010 	ktime_get_ts(&ts);
1011 	curtime = timespec_to_ns(&ts);
1012 	delta = curtime - ctx->stats.tstamp;
1013 
1014 	WARN_ON(!mutex_is_locked(&ctx->state_mutex));
1015 	WARN_ON(delta < 0);
1016 
1017 	spu = ctx->spu;
1018 	old_state = ctx->stats.util_state;
1019 	ctx->stats.util_state = new_state;
1020 	ctx->stats.tstamp = curtime;
1021 
1022 	/*
1023 	 * Update the physical SPU utilization statistics.
1024 	 */
1025 	if (spu) {
1026 		ctx->stats.times[old_state] += delta;
1027 		spu->stats.times[old_state] += delta;
1028 		spu->stats.util_state = new_state;
1029 		spu->stats.tstamp = curtime;
1030 		node = spu->node;
1031 		if (old_state == SPU_UTIL_USER)
1032 			atomic_dec(&cbe_spu_info[node].busy_spus);
1033 		if (new_state == SPU_UTIL_USER);
1034 			atomic_inc(&cbe_spu_info[node].busy_spus);
1035 	}
1036 }
1037 
1038 #define LOAD_INT(x) ((x) >> FSHIFT)
1039 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
1040 
1041 static int show_spu_loadavg(struct seq_file *s, void *private)
1042 {
1043 	int a, b, c;
1044 
1045 	a = spu_avenrun[0] + (FIXED_1/200);
1046 	b = spu_avenrun[1] + (FIXED_1/200);
1047 	c = spu_avenrun[2] + (FIXED_1/200);
1048 
1049 	/*
1050 	 * Note that last_pid doesn't really make much sense for the
1051 	 * SPU loadavg (it even seems very odd on the CPU side...),
1052 	 * but we include it here to have a 100% compatible interface.
1053 	 */
1054 	seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
1055 		LOAD_INT(a), LOAD_FRAC(a),
1056 		LOAD_INT(b), LOAD_FRAC(b),
1057 		LOAD_INT(c), LOAD_FRAC(c),
1058 		count_active_contexts(),
1059 		atomic_read(&nr_spu_contexts),
1060 		current->nsproxy->pid_ns->last_pid);
1061 	return 0;
1062 }
1063 
1064 static int spu_loadavg_open(struct inode *inode, struct file *file)
1065 {
1066 	return single_open(file, show_spu_loadavg, NULL);
1067 }
1068 
1069 static const struct file_operations spu_loadavg_fops = {
1070 	.open		= spu_loadavg_open,
1071 	.read		= seq_read,
1072 	.llseek		= seq_lseek,
1073 	.release	= single_release,
1074 };
1075 
1076 int __init spu_sched_init(void)
1077 {
1078 	struct proc_dir_entry *entry;
1079 	int err = -ENOMEM, i;
1080 
1081 	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
1082 	if (!spu_prio)
1083 		goto out;
1084 
1085 	for (i = 0; i < MAX_PRIO; i++) {
1086 		INIT_LIST_HEAD(&spu_prio->runq[i]);
1087 		__clear_bit(i, spu_prio->bitmap);
1088 	}
1089 	spin_lock_init(&spu_prio->runq_lock);
1090 
1091 	setup_timer(&spusched_timer, spusched_wake, 0);
1092 	setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
1093 
1094 	spusched_task = kthread_run(spusched_thread, NULL, "spusched");
1095 	if (IS_ERR(spusched_task)) {
1096 		err = PTR_ERR(spusched_task);
1097 		goto out_free_spu_prio;
1098 	}
1099 
1100 	mod_timer(&spuloadavg_timer, 0);
1101 
1102 	entry = proc_create("spu_loadavg", 0, NULL, &spu_loadavg_fops);
1103 	if (!entry)
1104 		goto out_stop_kthread;
1105 
1106 	pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
1107 			SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
1108 	return 0;
1109 
1110  out_stop_kthread:
1111 	kthread_stop(spusched_task);
1112  out_free_spu_prio:
1113 	kfree(spu_prio);
1114  out:
1115 	return err;
1116 }
1117 
1118 void spu_sched_exit(void)
1119 {
1120 	struct spu *spu;
1121 	int node;
1122 
1123 	remove_proc_entry("spu_loadavg", NULL);
1124 
1125 	del_timer_sync(&spusched_timer);
1126 	del_timer_sync(&spuloadavg_timer);
1127 	kthread_stop(spusched_task);
1128 
1129 	for (node = 0; node < MAX_NUMNODES; node++) {
1130 		mutex_lock(&cbe_spu_info[node].list_mutex);
1131 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
1132 			if (spu->alloc_state != SPU_FREE)
1133 				spu->alloc_state = SPU_FREE;
1134 		mutex_unlock(&cbe_spu_info[node].list_mutex);
1135 	}
1136 	kfree(spu_prio);
1137 }
1138