1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* sched.c - SPU scheduler.
3  *
4  * Copyright (C) IBM 2005
5  * Author: Mark Nutter <mnutter@us.ibm.com>
6  *
7  * 2006-03-31	NUMA domains added.
8  */
9 
10 #undef DEBUG
11 
12 #include <linux/errno.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sched/loadavg.h>
15 #include <linux/sched/rt.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/slab.h>
19 #include <linux/completion.h>
20 #include <linux/vmalloc.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/numa.h>
25 #include <linux/mutex.h>
26 #include <linux/notifier.h>
27 #include <linux/kthread.h>
28 #include <linux/pid_namespace.h>
29 #include <linux/proc_fs.h>
30 #include <linux/seq_file.h>
31 
32 #include <asm/io.h>
33 #include <asm/mmu_context.h>
34 #include <asm/spu.h>
35 #include <asm/spu_csa.h>
36 #include <asm/spu_priv1.h>
37 #include "spufs.h"
38 #define CREATE_TRACE_POINTS
39 #include "sputrace.h"
40 
41 struct spu_prio_array {
42 	DECLARE_BITMAP(bitmap, MAX_PRIO);
43 	struct list_head runq[MAX_PRIO];
44 	spinlock_t runq_lock;
45 	int nr_waiting;
46 };
47 
48 static unsigned long spu_avenrun[3];
49 static struct spu_prio_array *spu_prio;
50 static struct task_struct *spusched_task;
51 static struct timer_list spusched_timer;
52 static struct timer_list spuloadavg_timer;
53 
54 /*
55  * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
56  */
57 #define NORMAL_PRIO		120
58 
59 /*
60  * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
61  * tick for every 10 CPU scheduler ticks.
62  */
63 #define SPUSCHED_TICK		(10)
64 
65 /*
66  * These are the 'tuning knobs' of the scheduler:
67  *
68  * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
69  * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
70  */
71 #define MIN_SPU_TIMESLICE	max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
72 #define DEF_SPU_TIMESLICE	(100 * HZ / (1000 * SPUSCHED_TICK))
73 
74 #define SCALE_PRIO(x, prio) \
75 	max(x * (MAX_PRIO - prio) / (NICE_WIDTH / 2), MIN_SPU_TIMESLICE)
76 
77 /*
78  * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
79  * [800ms ... 100ms ... 5ms]
80  *
81  * The higher a thread's priority, the bigger timeslices
82  * it gets during one round of execution. But even the lowest
83  * priority thread gets MIN_TIMESLICE worth of execution time.
84  */
85 void spu_set_timeslice(struct spu_context *ctx)
86 {
87 	if (ctx->prio < NORMAL_PRIO)
88 		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
89 	else
90 		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
91 }
92 
93 /*
94  * Update scheduling information from the owning thread.
95  */
96 void __spu_update_sched_info(struct spu_context *ctx)
97 {
98 	/*
99 	 * assert that the context is not on the runqueue, so it is safe
100 	 * to change its scheduling parameters.
101 	 */
102 	BUG_ON(!list_empty(&ctx->rq));
103 
104 	/*
105 	 * 32-Bit assignments are atomic on powerpc, and we don't care about
106 	 * memory ordering here because retrieving the controlling thread is
107 	 * per definition racy.
108 	 */
109 	ctx->tid = current->pid;
110 
111 	/*
112 	 * We do our own priority calculations, so we normally want
113 	 * ->static_prio to start with. Unfortunately this field
114 	 * contains junk for threads with a realtime scheduling
115 	 * policy so we have to look at ->prio in this case.
116 	 */
117 	if (rt_prio(current->prio))
118 		ctx->prio = current->prio;
119 	else
120 		ctx->prio = current->static_prio;
121 	ctx->policy = current->policy;
122 
123 	/*
124 	 * TO DO: the context may be loaded, so we may need to activate
125 	 * it again on a different node. But it shouldn't hurt anything
126 	 * to update its parameters, because we know that the scheduler
127 	 * is not actively looking at this field, since it is not on the
128 	 * runqueue. The context will be rescheduled on the proper node
129 	 * if it is timesliced or preempted.
130 	 */
131 	cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr);
132 
133 	/* Save the current cpu id for spu interrupt routing. */
134 	ctx->last_ran = raw_smp_processor_id();
135 }
136 
137 void spu_update_sched_info(struct spu_context *ctx)
138 {
139 	int node;
140 
141 	if (ctx->state == SPU_STATE_RUNNABLE) {
142 		node = ctx->spu->node;
143 
144 		/*
145 		 * Take list_mutex to sync with find_victim().
146 		 */
147 		mutex_lock(&cbe_spu_info[node].list_mutex);
148 		__spu_update_sched_info(ctx);
149 		mutex_unlock(&cbe_spu_info[node].list_mutex);
150 	} else {
151 		__spu_update_sched_info(ctx);
152 	}
153 }
154 
155 static int __node_allowed(struct spu_context *ctx, int node)
156 {
157 	if (nr_cpus_node(node)) {
158 		const struct cpumask *mask = cpumask_of_node(node);
159 
160 		if (cpumask_intersects(mask, &ctx->cpus_allowed))
161 			return 1;
162 	}
163 
164 	return 0;
165 }
166 
167 static int node_allowed(struct spu_context *ctx, int node)
168 {
169 	int rval;
170 
171 	spin_lock(&spu_prio->runq_lock);
172 	rval = __node_allowed(ctx, node);
173 	spin_unlock(&spu_prio->runq_lock);
174 
175 	return rval;
176 }
177 
178 void do_notify_spus_active(void)
179 {
180 	int node;
181 
182 	/*
183 	 * Wake up the active spu_contexts.
184 	 */
185 	for_each_online_node(node) {
186 		struct spu *spu;
187 
188 		mutex_lock(&cbe_spu_info[node].list_mutex);
189 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
190 			if (spu->alloc_state != SPU_FREE) {
191 				struct spu_context *ctx = spu->ctx;
192 				set_bit(SPU_SCHED_NOTIFY_ACTIVE,
193 					&ctx->sched_flags);
194 				mb();
195 				wake_up_all(&ctx->stop_wq);
196 			}
197 		}
198 		mutex_unlock(&cbe_spu_info[node].list_mutex);
199 	}
200 }
201 
202 /**
203  * spu_bind_context - bind spu context to physical spu
204  * @spu:	physical spu to bind to
205  * @ctx:	context to bind
206  */
207 static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
208 {
209 	spu_context_trace(spu_bind_context__enter, ctx, spu);
210 
211 	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
212 
213 	if (ctx->flags & SPU_CREATE_NOSCHED)
214 		atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
215 
216 	ctx->stats.slb_flt_base = spu->stats.slb_flt;
217 	ctx->stats.class2_intr_base = spu->stats.class2_intr;
218 
219 	spu_associate_mm(spu, ctx->owner);
220 
221 	spin_lock_irq(&spu->register_lock);
222 	spu->ctx = ctx;
223 	spu->flags = 0;
224 	ctx->spu = spu;
225 	ctx->ops = &spu_hw_ops;
226 	spu->pid = current->pid;
227 	spu->tgid = current->tgid;
228 	spu->ibox_callback = spufs_ibox_callback;
229 	spu->wbox_callback = spufs_wbox_callback;
230 	spu->stop_callback = spufs_stop_callback;
231 	spu->mfc_callback = spufs_mfc_callback;
232 	spin_unlock_irq(&spu->register_lock);
233 
234 	spu_unmap_mappings(ctx);
235 
236 	spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
237 	spu_restore(&ctx->csa, spu);
238 	spu->timestamp = jiffies;
239 	ctx->state = SPU_STATE_RUNNABLE;
240 
241 	spuctx_switch_state(ctx, SPU_UTIL_USER);
242 }
243 
244 /*
245  * Must be used with the list_mutex held.
246  */
247 static inline int sched_spu(struct spu *spu)
248 {
249 	BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
250 
251 	return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
252 }
253 
254 static void aff_merge_remaining_ctxs(struct spu_gang *gang)
255 {
256 	struct spu_context *ctx;
257 
258 	list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
259 		if (list_empty(&ctx->aff_list))
260 			list_add(&ctx->aff_list, &gang->aff_list_head);
261 	}
262 	gang->aff_flags |= AFF_MERGED;
263 }
264 
265 static void aff_set_offsets(struct spu_gang *gang)
266 {
267 	struct spu_context *ctx;
268 	int offset;
269 
270 	offset = -1;
271 	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
272 								aff_list) {
273 		if (&ctx->aff_list == &gang->aff_list_head)
274 			break;
275 		ctx->aff_offset = offset--;
276 	}
277 
278 	offset = 0;
279 	list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
280 		if (&ctx->aff_list == &gang->aff_list_head)
281 			break;
282 		ctx->aff_offset = offset++;
283 	}
284 
285 	gang->aff_flags |= AFF_OFFSETS_SET;
286 }
287 
288 static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
289 		 int group_size, int lowest_offset)
290 {
291 	struct spu *spu;
292 	int node, n;
293 
294 	/*
295 	 * TODO: A better algorithm could be used to find a good spu to be
296 	 *       used as reference location for the ctxs chain.
297 	 */
298 	node = cpu_to_node(raw_smp_processor_id());
299 	for (n = 0; n < MAX_NUMNODES; n++, node++) {
300 		/*
301 		 * "available_spus" counts how many spus are not potentially
302 		 * going to be used by other affinity gangs whose reference
303 		 * context is already in place. Although this code seeks to
304 		 * avoid having affinity gangs with a summed amount of
305 		 * contexts bigger than the amount of spus in the node,
306 		 * this may happen sporadically. In this case, available_spus
307 		 * becomes negative, which is harmless.
308 		 */
309 		int available_spus;
310 
311 		node = (node < MAX_NUMNODES) ? node : 0;
312 		if (!node_allowed(ctx, node))
313 			continue;
314 
315 		available_spus = 0;
316 		mutex_lock(&cbe_spu_info[node].list_mutex);
317 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
318 			if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
319 					&& spu->ctx->gang->aff_ref_spu)
320 				available_spus -= spu->ctx->gang->contexts;
321 			available_spus++;
322 		}
323 		if (available_spus < ctx->gang->contexts) {
324 			mutex_unlock(&cbe_spu_info[node].list_mutex);
325 			continue;
326 		}
327 
328 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
329 			if ((!mem_aff || spu->has_mem_affinity) &&
330 							sched_spu(spu)) {
331 				mutex_unlock(&cbe_spu_info[node].list_mutex);
332 				return spu;
333 			}
334 		}
335 		mutex_unlock(&cbe_spu_info[node].list_mutex);
336 	}
337 	return NULL;
338 }
339 
340 static void aff_set_ref_point_location(struct spu_gang *gang)
341 {
342 	int mem_aff, gs, lowest_offset;
343 	struct spu_context *ctx;
344 	struct spu *tmp;
345 
346 	mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
347 	lowest_offset = 0;
348 	gs = 0;
349 
350 	list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
351 		gs++;
352 
353 	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
354 								aff_list) {
355 		if (&ctx->aff_list == &gang->aff_list_head)
356 			break;
357 		lowest_offset = ctx->aff_offset;
358 	}
359 
360 	gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
361 							lowest_offset);
362 }
363 
364 static struct spu *ctx_location(struct spu *ref, int offset, int node)
365 {
366 	struct spu *spu;
367 
368 	spu = NULL;
369 	if (offset >= 0) {
370 		list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
371 			BUG_ON(spu->node != node);
372 			if (offset == 0)
373 				break;
374 			if (sched_spu(spu))
375 				offset--;
376 		}
377 	} else {
378 		list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
379 			BUG_ON(spu->node != node);
380 			if (offset == 0)
381 				break;
382 			if (sched_spu(spu))
383 				offset++;
384 		}
385 	}
386 
387 	return spu;
388 }
389 
390 /*
391  * affinity_check is called each time a context is going to be scheduled.
392  * It returns the spu ptr on which the context must run.
393  */
394 static int has_affinity(struct spu_context *ctx)
395 {
396 	struct spu_gang *gang = ctx->gang;
397 
398 	if (list_empty(&ctx->aff_list))
399 		return 0;
400 
401 	if (atomic_read(&ctx->gang->aff_sched_count) == 0)
402 		ctx->gang->aff_ref_spu = NULL;
403 
404 	if (!gang->aff_ref_spu) {
405 		if (!(gang->aff_flags & AFF_MERGED))
406 			aff_merge_remaining_ctxs(gang);
407 		if (!(gang->aff_flags & AFF_OFFSETS_SET))
408 			aff_set_offsets(gang);
409 		aff_set_ref_point_location(gang);
410 	}
411 
412 	return gang->aff_ref_spu != NULL;
413 }
414 
415 /**
416  * spu_unbind_context - unbind spu context from physical spu
417  * @spu:	physical spu to unbind from
418  * @ctx:	context to unbind
419  */
420 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
421 {
422 	u32 status;
423 
424 	spu_context_trace(spu_unbind_context__enter, ctx, spu);
425 
426 	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
427 
428  	if (spu->ctx->flags & SPU_CREATE_NOSCHED)
429 		atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
430 
431 	if (ctx->gang)
432 		/*
433 		 * If ctx->gang->aff_sched_count is positive, SPU affinity is
434 		 * being considered in this gang. Using atomic_dec_if_positive
435 		 * allow us to skip an explicit check for affinity in this gang
436 		 */
437 		atomic_dec_if_positive(&ctx->gang->aff_sched_count);
438 
439 	spu_unmap_mappings(ctx);
440 	spu_save(&ctx->csa, spu);
441 	spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
442 
443 	spin_lock_irq(&spu->register_lock);
444 	spu->timestamp = jiffies;
445 	ctx->state = SPU_STATE_SAVED;
446 	spu->ibox_callback = NULL;
447 	spu->wbox_callback = NULL;
448 	spu->stop_callback = NULL;
449 	spu->mfc_callback = NULL;
450 	spu->pid = 0;
451 	spu->tgid = 0;
452 	ctx->ops = &spu_backing_ops;
453 	spu->flags = 0;
454 	spu->ctx = NULL;
455 	spin_unlock_irq(&spu->register_lock);
456 
457 	spu_associate_mm(spu, NULL);
458 
459 	ctx->stats.slb_flt +=
460 		(spu->stats.slb_flt - ctx->stats.slb_flt_base);
461 	ctx->stats.class2_intr +=
462 		(spu->stats.class2_intr - ctx->stats.class2_intr_base);
463 
464 	/* This maps the underlying spu state to idle */
465 	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
466 	ctx->spu = NULL;
467 
468 	if (spu_stopped(ctx, &status))
469 		wake_up_all(&ctx->stop_wq);
470 }
471 
472 /**
473  * spu_add_to_rq - add a context to the runqueue
474  * @ctx:       context to add
475  */
476 static void __spu_add_to_rq(struct spu_context *ctx)
477 {
478 	/*
479 	 * Unfortunately this code path can be called from multiple threads
480 	 * on behalf of a single context due to the way the problem state
481 	 * mmap support works.
482 	 *
483 	 * Fortunately we need to wake up all these threads at the same time
484 	 * and can simply skip the runqueue addition for every but the first
485 	 * thread getting into this codepath.
486 	 *
487 	 * It's still quite hacky, and long-term we should proxy all other
488 	 * threads through the owner thread so that spu_run is in control
489 	 * of all the scheduling activity for a given context.
490 	 */
491 	if (list_empty(&ctx->rq)) {
492 		list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
493 		set_bit(ctx->prio, spu_prio->bitmap);
494 		if (!spu_prio->nr_waiting++)
495 			mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
496 	}
497 }
498 
499 static void spu_add_to_rq(struct spu_context *ctx)
500 {
501 	spin_lock(&spu_prio->runq_lock);
502 	__spu_add_to_rq(ctx);
503 	spin_unlock(&spu_prio->runq_lock);
504 }
505 
506 static void __spu_del_from_rq(struct spu_context *ctx)
507 {
508 	int prio = ctx->prio;
509 
510 	if (!list_empty(&ctx->rq)) {
511 		if (!--spu_prio->nr_waiting)
512 			del_timer(&spusched_timer);
513 		list_del_init(&ctx->rq);
514 
515 		if (list_empty(&spu_prio->runq[prio]))
516 			clear_bit(prio, spu_prio->bitmap);
517 	}
518 }
519 
520 void spu_del_from_rq(struct spu_context *ctx)
521 {
522 	spin_lock(&spu_prio->runq_lock);
523 	__spu_del_from_rq(ctx);
524 	spin_unlock(&spu_prio->runq_lock);
525 }
526 
527 static void spu_prio_wait(struct spu_context *ctx)
528 {
529 	DEFINE_WAIT(wait);
530 
531 	/*
532 	 * The caller must explicitly wait for a context to be loaded
533 	 * if the nosched flag is set.  If NOSCHED is not set, the caller
534 	 * queues the context and waits for an spu event or error.
535 	 */
536 	BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
537 
538 	spin_lock(&spu_prio->runq_lock);
539 	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
540 	if (!signal_pending(current)) {
541 		__spu_add_to_rq(ctx);
542 		spin_unlock(&spu_prio->runq_lock);
543 		mutex_unlock(&ctx->state_mutex);
544 		schedule();
545 		mutex_lock(&ctx->state_mutex);
546 		spin_lock(&spu_prio->runq_lock);
547 		__spu_del_from_rq(ctx);
548 	}
549 	spin_unlock(&spu_prio->runq_lock);
550 	__set_current_state(TASK_RUNNING);
551 	remove_wait_queue(&ctx->stop_wq, &wait);
552 }
553 
554 static struct spu *spu_get_idle(struct spu_context *ctx)
555 {
556 	struct spu *spu, *aff_ref_spu;
557 	int node, n;
558 
559 	spu_context_nospu_trace(spu_get_idle__enter, ctx);
560 
561 	if (ctx->gang) {
562 		mutex_lock(&ctx->gang->aff_mutex);
563 		if (has_affinity(ctx)) {
564 			aff_ref_spu = ctx->gang->aff_ref_spu;
565 			atomic_inc(&ctx->gang->aff_sched_count);
566 			mutex_unlock(&ctx->gang->aff_mutex);
567 			node = aff_ref_spu->node;
568 
569 			mutex_lock(&cbe_spu_info[node].list_mutex);
570 			spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
571 			if (spu && spu->alloc_state == SPU_FREE)
572 				goto found;
573 			mutex_unlock(&cbe_spu_info[node].list_mutex);
574 
575 			atomic_dec(&ctx->gang->aff_sched_count);
576 			goto not_found;
577 		}
578 		mutex_unlock(&ctx->gang->aff_mutex);
579 	}
580 	node = cpu_to_node(raw_smp_processor_id());
581 	for (n = 0; n < MAX_NUMNODES; n++, node++) {
582 		node = (node < MAX_NUMNODES) ? node : 0;
583 		if (!node_allowed(ctx, node))
584 			continue;
585 
586 		mutex_lock(&cbe_spu_info[node].list_mutex);
587 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
588 			if (spu->alloc_state == SPU_FREE)
589 				goto found;
590 		}
591 		mutex_unlock(&cbe_spu_info[node].list_mutex);
592 	}
593 
594  not_found:
595 	spu_context_nospu_trace(spu_get_idle__not_found, ctx);
596 	return NULL;
597 
598  found:
599 	spu->alloc_state = SPU_USED;
600 	mutex_unlock(&cbe_spu_info[node].list_mutex);
601 	spu_context_trace(spu_get_idle__found, ctx, spu);
602 	spu_init_channels(spu);
603 	return spu;
604 }
605 
606 /**
607  * find_victim - find a lower priority context to preempt
608  * @ctx:	candidate context for running
609  *
610  * Returns the freed physical spu to run the new context on.
611  */
612 static struct spu *find_victim(struct spu_context *ctx)
613 {
614 	struct spu_context *victim = NULL;
615 	struct spu *spu;
616 	int node, n;
617 
618 	spu_context_nospu_trace(spu_find_victim__enter, ctx);
619 
620 	/*
621 	 * Look for a possible preemption candidate on the local node first.
622 	 * If there is no candidate look at the other nodes.  This isn't
623 	 * exactly fair, but so far the whole spu scheduler tries to keep
624 	 * a strong node affinity.  We might want to fine-tune this in
625 	 * the future.
626 	 */
627  restart:
628 	node = cpu_to_node(raw_smp_processor_id());
629 	for (n = 0; n < MAX_NUMNODES; n++, node++) {
630 		node = (node < MAX_NUMNODES) ? node : 0;
631 		if (!node_allowed(ctx, node))
632 			continue;
633 
634 		mutex_lock(&cbe_spu_info[node].list_mutex);
635 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
636 			struct spu_context *tmp = spu->ctx;
637 
638 			if (tmp && tmp->prio > ctx->prio &&
639 			    !(tmp->flags & SPU_CREATE_NOSCHED) &&
640 			    (!victim || tmp->prio > victim->prio)) {
641 				victim = spu->ctx;
642 			}
643 		}
644 		if (victim)
645 			get_spu_context(victim);
646 		mutex_unlock(&cbe_spu_info[node].list_mutex);
647 
648 		if (victim) {
649 			/*
650 			 * This nests ctx->state_mutex, but we always lock
651 			 * higher priority contexts before lower priority
652 			 * ones, so this is safe until we introduce
653 			 * priority inheritance schemes.
654 			 *
655 			 * XXX if the highest priority context is locked,
656 			 * this can loop a long time.  Might be better to
657 			 * look at another context or give up after X retries.
658 			 */
659 			if (!mutex_trylock(&victim->state_mutex)) {
660 				put_spu_context(victim);
661 				victim = NULL;
662 				goto restart;
663 			}
664 
665 			spu = victim->spu;
666 			if (!spu || victim->prio <= ctx->prio) {
667 				/*
668 				 * This race can happen because we've dropped
669 				 * the active list mutex.  Not a problem, just
670 				 * restart the search.
671 				 */
672 				mutex_unlock(&victim->state_mutex);
673 				put_spu_context(victim);
674 				victim = NULL;
675 				goto restart;
676 			}
677 
678 			spu_context_trace(__spu_deactivate__unload, ctx, spu);
679 
680 			mutex_lock(&cbe_spu_info[node].list_mutex);
681 			cbe_spu_info[node].nr_active--;
682 			spu_unbind_context(spu, victim);
683 			mutex_unlock(&cbe_spu_info[node].list_mutex);
684 
685 			victim->stats.invol_ctx_switch++;
686 			spu->stats.invol_ctx_switch++;
687 			if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags))
688 				spu_add_to_rq(victim);
689 
690 			mutex_unlock(&victim->state_mutex);
691 			put_spu_context(victim);
692 
693 			return spu;
694 		}
695 	}
696 
697 	return NULL;
698 }
699 
700 static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
701 {
702 	int node = spu->node;
703 	int success = 0;
704 
705 	spu_set_timeslice(ctx);
706 
707 	mutex_lock(&cbe_spu_info[node].list_mutex);
708 	if (spu->ctx == NULL) {
709 		spu_bind_context(spu, ctx);
710 		cbe_spu_info[node].nr_active++;
711 		spu->alloc_state = SPU_USED;
712 		success = 1;
713 	}
714 	mutex_unlock(&cbe_spu_info[node].list_mutex);
715 
716 	if (success)
717 		wake_up_all(&ctx->run_wq);
718 	else
719 		spu_add_to_rq(ctx);
720 }
721 
722 static void spu_schedule(struct spu *spu, struct spu_context *ctx)
723 {
724 	/* not a candidate for interruptible because it's called either
725 	   from the scheduler thread or from spu_deactivate */
726 	mutex_lock(&ctx->state_mutex);
727 	if (ctx->state == SPU_STATE_SAVED)
728 		__spu_schedule(spu, ctx);
729 	spu_release(ctx);
730 }
731 
732 /**
733  * spu_unschedule - remove a context from a spu, and possibly release it.
734  * @spu:	The SPU to unschedule from
735  * @ctx:	The context currently scheduled on the SPU
736  * @free_spu	Whether to free the SPU for other contexts
737  *
738  * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
739  * SPU is made available for other contexts (ie, may be returned by
740  * spu_get_idle). If this is zero, the caller is expected to schedule another
741  * context to this spu.
742  *
743  * Should be called with ctx->state_mutex held.
744  */
745 static void spu_unschedule(struct spu *spu, struct spu_context *ctx,
746 		int free_spu)
747 {
748 	int node = spu->node;
749 
750 	mutex_lock(&cbe_spu_info[node].list_mutex);
751 	cbe_spu_info[node].nr_active--;
752 	if (free_spu)
753 		spu->alloc_state = SPU_FREE;
754 	spu_unbind_context(spu, ctx);
755 	ctx->stats.invol_ctx_switch++;
756 	spu->stats.invol_ctx_switch++;
757 	mutex_unlock(&cbe_spu_info[node].list_mutex);
758 }
759 
760 /**
761  * spu_activate - find a free spu for a context and execute it
762  * @ctx:	spu context to schedule
763  * @flags:	flags (currently ignored)
764  *
765  * Tries to find a free spu to run @ctx.  If no free spu is available
766  * add the context to the runqueue so it gets woken up once an spu
767  * is available.
768  */
769 int spu_activate(struct spu_context *ctx, unsigned long flags)
770 {
771 	struct spu *spu;
772 
773 	/*
774 	 * If there are multiple threads waiting for a single context
775 	 * only one actually binds the context while the others will
776 	 * only be able to acquire the state_mutex once the context
777 	 * already is in runnable state.
778 	 */
779 	if (ctx->spu)
780 		return 0;
781 
782 spu_activate_top:
783 	if (signal_pending(current))
784 		return -ERESTARTSYS;
785 
786 	spu = spu_get_idle(ctx);
787 	/*
788 	 * If this is a realtime thread we try to get it running by
789 	 * preempting a lower priority thread.
790 	 */
791 	if (!spu && rt_prio(ctx->prio))
792 		spu = find_victim(ctx);
793 	if (spu) {
794 		unsigned long runcntl;
795 
796 		runcntl = ctx->ops->runcntl_read(ctx);
797 		__spu_schedule(spu, ctx);
798 		if (runcntl & SPU_RUNCNTL_RUNNABLE)
799 			spuctx_switch_state(ctx, SPU_UTIL_USER);
800 
801 		return 0;
802 	}
803 
804 	if (ctx->flags & SPU_CREATE_NOSCHED) {
805 		spu_prio_wait(ctx);
806 		goto spu_activate_top;
807 	}
808 
809 	spu_add_to_rq(ctx);
810 
811 	return 0;
812 }
813 
814 /**
815  * grab_runnable_context - try to find a runnable context
816  *
817  * Remove the highest priority context on the runqueue and return it
818  * to the caller.  Returns %NULL if no runnable context was found.
819  */
820 static struct spu_context *grab_runnable_context(int prio, int node)
821 {
822 	struct spu_context *ctx;
823 	int best;
824 
825 	spin_lock(&spu_prio->runq_lock);
826 	best = find_first_bit(spu_prio->bitmap, prio);
827 	while (best < prio) {
828 		struct list_head *rq = &spu_prio->runq[best];
829 
830 		list_for_each_entry(ctx, rq, rq) {
831 			/* XXX(hch): check for affinity here as well */
832 			if (__node_allowed(ctx, node)) {
833 				__spu_del_from_rq(ctx);
834 				goto found;
835 			}
836 		}
837 		best++;
838 	}
839 	ctx = NULL;
840  found:
841 	spin_unlock(&spu_prio->runq_lock);
842 	return ctx;
843 }
844 
845 static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
846 {
847 	struct spu *spu = ctx->spu;
848 	struct spu_context *new = NULL;
849 
850 	if (spu) {
851 		new = grab_runnable_context(max_prio, spu->node);
852 		if (new || force) {
853 			spu_unschedule(spu, ctx, new == NULL);
854 			if (new) {
855 				if (new->flags & SPU_CREATE_NOSCHED)
856 					wake_up(&new->stop_wq);
857 				else {
858 					spu_release(ctx);
859 					spu_schedule(spu, new);
860 					/* this one can't easily be made
861 					   interruptible */
862 					mutex_lock(&ctx->state_mutex);
863 				}
864 			}
865 		}
866 	}
867 
868 	return new != NULL;
869 }
870 
871 /**
872  * spu_deactivate - unbind a context from it's physical spu
873  * @ctx:	spu context to unbind
874  *
875  * Unbind @ctx from the physical spu it is running on and schedule
876  * the highest priority context to run on the freed physical spu.
877  */
878 void spu_deactivate(struct spu_context *ctx)
879 {
880 	spu_context_nospu_trace(spu_deactivate__enter, ctx);
881 	__spu_deactivate(ctx, 1, MAX_PRIO);
882 }
883 
884 /**
885  * spu_yield -	yield a physical spu if others are waiting
886  * @ctx:	spu context to yield
887  *
888  * Check if there is a higher priority context waiting and if yes
889  * unbind @ctx from the physical spu and schedule the highest
890  * priority context to run on the freed physical spu instead.
891  */
892 void spu_yield(struct spu_context *ctx)
893 {
894 	spu_context_nospu_trace(spu_yield__enter, ctx);
895 	if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
896 		mutex_lock(&ctx->state_mutex);
897 		__spu_deactivate(ctx, 0, MAX_PRIO);
898 		mutex_unlock(&ctx->state_mutex);
899 	}
900 }
901 
902 static noinline void spusched_tick(struct spu_context *ctx)
903 {
904 	struct spu_context *new = NULL;
905 	struct spu *spu = NULL;
906 
907 	if (spu_acquire(ctx))
908 		BUG();	/* a kernel thread never has signals pending */
909 
910 	if (ctx->state != SPU_STATE_RUNNABLE)
911 		goto out;
912 	if (ctx->flags & SPU_CREATE_NOSCHED)
913 		goto out;
914 	if (ctx->policy == SCHED_FIFO)
915 		goto out;
916 
917 	if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
918 		goto out;
919 
920 	spu = ctx->spu;
921 
922 	spu_context_trace(spusched_tick__preempt, ctx, spu);
923 
924 	new = grab_runnable_context(ctx->prio + 1, spu->node);
925 	if (new) {
926 		spu_unschedule(spu, ctx, 0);
927 		if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
928 			spu_add_to_rq(ctx);
929 	} else {
930 		spu_context_nospu_trace(spusched_tick__newslice, ctx);
931 		if (!ctx->time_slice)
932 			ctx->time_slice++;
933 	}
934 out:
935 	spu_release(ctx);
936 
937 	if (new)
938 		spu_schedule(spu, new);
939 }
940 
941 /**
942  * count_active_contexts - count nr of active tasks
943  *
944  * Return the number of tasks currently running or waiting to run.
945  *
946  * Note that we don't take runq_lock / list_mutex here.  Reading
947  * a single 32bit value is atomic on powerpc, and we don't care
948  * about memory ordering issues here.
949  */
950 static unsigned long count_active_contexts(void)
951 {
952 	int nr_active = 0, node;
953 
954 	for (node = 0; node < MAX_NUMNODES; node++)
955 		nr_active += cbe_spu_info[node].nr_active;
956 	nr_active += spu_prio->nr_waiting;
957 
958 	return nr_active;
959 }
960 
961 /**
962  * spu_calc_load - update the avenrun load estimates.
963  *
964  * No locking against reading these values from userspace, as for
965  * the CPU loadavg code.
966  */
967 static void spu_calc_load(void)
968 {
969 	unsigned long active_tasks; /* fixed-point */
970 
971 	active_tasks = count_active_contexts() * FIXED_1;
972 	spu_avenrun[0] = calc_load(spu_avenrun[0], EXP_1, active_tasks);
973 	spu_avenrun[1] = calc_load(spu_avenrun[1], EXP_5, active_tasks);
974 	spu_avenrun[2] = calc_load(spu_avenrun[2], EXP_15, active_tasks);
975 }
976 
977 static void spusched_wake(struct timer_list *unused)
978 {
979 	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
980 	wake_up_process(spusched_task);
981 }
982 
983 static void spuloadavg_wake(struct timer_list *unused)
984 {
985 	mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
986 	spu_calc_load();
987 }
988 
989 static int spusched_thread(void *unused)
990 {
991 	struct spu *spu;
992 	int node;
993 
994 	while (!kthread_should_stop()) {
995 		set_current_state(TASK_INTERRUPTIBLE);
996 		schedule();
997 		for (node = 0; node < MAX_NUMNODES; node++) {
998 			struct mutex *mtx = &cbe_spu_info[node].list_mutex;
999 
1000 			mutex_lock(mtx);
1001 			list_for_each_entry(spu, &cbe_spu_info[node].spus,
1002 					cbe_list) {
1003 				struct spu_context *ctx = spu->ctx;
1004 
1005 				if (ctx) {
1006 					get_spu_context(ctx);
1007 					mutex_unlock(mtx);
1008 					spusched_tick(ctx);
1009 					mutex_lock(mtx);
1010 					put_spu_context(ctx);
1011 				}
1012 			}
1013 			mutex_unlock(mtx);
1014 		}
1015 	}
1016 
1017 	return 0;
1018 }
1019 
1020 void spuctx_switch_state(struct spu_context *ctx,
1021 		enum spu_utilization_state new_state)
1022 {
1023 	unsigned long long curtime;
1024 	signed long long delta;
1025 	struct spu *spu;
1026 	enum spu_utilization_state old_state;
1027 	int node;
1028 
1029 	curtime = ktime_get_ns();
1030 	delta = curtime - ctx->stats.tstamp;
1031 
1032 	WARN_ON(!mutex_is_locked(&ctx->state_mutex));
1033 	WARN_ON(delta < 0);
1034 
1035 	spu = ctx->spu;
1036 	old_state = ctx->stats.util_state;
1037 	ctx->stats.util_state = new_state;
1038 	ctx->stats.tstamp = curtime;
1039 
1040 	/*
1041 	 * Update the physical SPU utilization statistics.
1042 	 */
1043 	if (spu) {
1044 		ctx->stats.times[old_state] += delta;
1045 		spu->stats.times[old_state] += delta;
1046 		spu->stats.util_state = new_state;
1047 		spu->stats.tstamp = curtime;
1048 		node = spu->node;
1049 		if (old_state == SPU_UTIL_USER)
1050 			atomic_dec(&cbe_spu_info[node].busy_spus);
1051 		if (new_state == SPU_UTIL_USER)
1052 			atomic_inc(&cbe_spu_info[node].busy_spus);
1053 	}
1054 }
1055 
1056 static int show_spu_loadavg(struct seq_file *s, void *private)
1057 {
1058 	int a, b, c;
1059 
1060 	a = spu_avenrun[0] + (FIXED_1/200);
1061 	b = spu_avenrun[1] + (FIXED_1/200);
1062 	c = spu_avenrun[2] + (FIXED_1/200);
1063 
1064 	/*
1065 	 * Note that last_pid doesn't really make much sense for the
1066 	 * SPU loadavg (it even seems very odd on the CPU side...),
1067 	 * but we include it here to have a 100% compatible interface.
1068 	 */
1069 	seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
1070 		LOAD_INT(a), LOAD_FRAC(a),
1071 		LOAD_INT(b), LOAD_FRAC(b),
1072 		LOAD_INT(c), LOAD_FRAC(c),
1073 		count_active_contexts(),
1074 		atomic_read(&nr_spu_contexts),
1075 		idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
1076 	return 0;
1077 };
1078 
1079 int __init spu_sched_init(void)
1080 {
1081 	struct proc_dir_entry *entry;
1082 	int err = -ENOMEM, i;
1083 
1084 	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
1085 	if (!spu_prio)
1086 		goto out;
1087 
1088 	for (i = 0; i < MAX_PRIO; i++) {
1089 		INIT_LIST_HEAD(&spu_prio->runq[i]);
1090 		__clear_bit(i, spu_prio->bitmap);
1091 	}
1092 	spin_lock_init(&spu_prio->runq_lock);
1093 
1094 	timer_setup(&spusched_timer, spusched_wake, 0);
1095 	timer_setup(&spuloadavg_timer, spuloadavg_wake, 0);
1096 
1097 	spusched_task = kthread_run(spusched_thread, NULL, "spusched");
1098 	if (IS_ERR(spusched_task)) {
1099 		err = PTR_ERR(spusched_task);
1100 		goto out_free_spu_prio;
1101 	}
1102 
1103 	mod_timer(&spuloadavg_timer, 0);
1104 
1105 	entry = proc_create_single("spu_loadavg", 0, NULL, show_spu_loadavg);
1106 	if (!entry)
1107 		goto out_stop_kthread;
1108 
1109 	pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
1110 			SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
1111 	return 0;
1112 
1113  out_stop_kthread:
1114 	kthread_stop(spusched_task);
1115  out_free_spu_prio:
1116 	kfree(spu_prio);
1117  out:
1118 	return err;
1119 }
1120 
1121 void spu_sched_exit(void)
1122 {
1123 	struct spu *spu;
1124 	int node;
1125 
1126 	remove_proc_entry("spu_loadavg", NULL);
1127 
1128 	del_timer_sync(&spusched_timer);
1129 	del_timer_sync(&spuloadavg_timer);
1130 	kthread_stop(spusched_task);
1131 
1132 	for (node = 0; node < MAX_NUMNODES; node++) {
1133 		mutex_lock(&cbe_spu_info[node].list_mutex);
1134 		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
1135 			if (spu->alloc_state != SPU_FREE)
1136 				spu->alloc_state = SPU_FREE;
1137 		mutex_unlock(&cbe_spu_info[node].list_mutex);
1138 	}
1139 	kfree(spu_prio);
1140 }
1141