1 /* sched.c - SPU scheduler. 2 * 3 * Copyright (C) IBM 2005 4 * Author: Mark Nutter <mnutter@us.ibm.com> 5 * 6 * 2006-03-31 NUMA domains added. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #undef DEBUG 24 25 #include <linux/module.h> 26 #include <linux/errno.h> 27 #include <linux/sched.h> 28 #include <linux/kernel.h> 29 #include <linux/mm.h> 30 #include <linux/completion.h> 31 #include <linux/vmalloc.h> 32 #include <linux/smp.h> 33 #include <linux/stddef.h> 34 #include <linux/unistd.h> 35 #include <linux/numa.h> 36 #include <linux/mutex.h> 37 #include <linux/notifier.h> 38 #include <linux/kthread.h> 39 #include <linux/pid_namespace.h> 40 #include <linux/proc_fs.h> 41 #include <linux/seq_file.h> 42 #include <linux/marker.h> 43 44 #include <asm/io.h> 45 #include <asm/mmu_context.h> 46 #include <asm/spu.h> 47 #include <asm/spu_csa.h> 48 #include <asm/spu_priv1.h> 49 #include "spufs.h" 50 51 struct spu_prio_array { 52 DECLARE_BITMAP(bitmap, MAX_PRIO); 53 struct list_head runq[MAX_PRIO]; 54 spinlock_t runq_lock; 55 int nr_waiting; 56 }; 57 58 static unsigned long spu_avenrun[3]; 59 static struct spu_prio_array *spu_prio; 60 static struct task_struct *spusched_task; 61 static struct timer_list spusched_timer; 62 static struct timer_list spuloadavg_timer; 63 64 /* 65 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0). 66 */ 67 #define NORMAL_PRIO 120 68 69 /* 70 * Frequency of the spu scheduler tick. By default we do one SPU scheduler 71 * tick for every 10 CPU scheduler ticks. 72 */ 73 #define SPUSCHED_TICK (10) 74 75 /* 76 * These are the 'tuning knobs' of the scheduler: 77 * 78 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is 79 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs. 80 */ 81 #define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1) 82 #define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK)) 83 84 #define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO) 85 #define SCALE_PRIO(x, prio) \ 86 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE) 87 88 /* 89 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values: 90 * [800ms ... 100ms ... 5ms] 91 * 92 * The higher a thread's priority, the bigger timeslices 93 * it gets during one round of execution. But even the lowest 94 * priority thread gets MIN_TIMESLICE worth of execution time. 95 */ 96 void spu_set_timeslice(struct spu_context *ctx) 97 { 98 if (ctx->prio < NORMAL_PRIO) 99 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio); 100 else 101 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio); 102 } 103 104 /* 105 * Update scheduling information from the owning thread. 106 */ 107 void __spu_update_sched_info(struct spu_context *ctx) 108 { 109 /* 110 * assert that the context is not on the runqueue, so it is safe 111 * to change its scheduling parameters. 112 */ 113 BUG_ON(!list_empty(&ctx->rq)); 114 115 /* 116 * 32-Bit assignments are atomic on powerpc, and we don't care about 117 * memory ordering here because retrieving the controlling thread is 118 * per definition racy. 119 */ 120 ctx->tid = current->pid; 121 122 /* 123 * We do our own priority calculations, so we normally want 124 * ->static_prio to start with. Unfortunately this field 125 * contains junk for threads with a realtime scheduling 126 * policy so we have to look at ->prio in this case. 127 */ 128 if (rt_prio(current->prio)) 129 ctx->prio = current->prio; 130 else 131 ctx->prio = current->static_prio; 132 ctx->policy = current->policy; 133 134 /* 135 * TO DO: the context may be loaded, so we may need to activate 136 * it again on a different node. But it shouldn't hurt anything 137 * to update its parameters, because we know that the scheduler 138 * is not actively looking at this field, since it is not on the 139 * runqueue. The context will be rescheduled on the proper node 140 * if it is timesliced or preempted. 141 */ 142 ctx->cpus_allowed = current->cpus_allowed; 143 144 /* Save the current cpu id for spu interrupt routing. */ 145 ctx->last_ran = raw_smp_processor_id(); 146 } 147 148 void spu_update_sched_info(struct spu_context *ctx) 149 { 150 int node; 151 152 if (ctx->state == SPU_STATE_RUNNABLE) { 153 node = ctx->spu->node; 154 155 /* 156 * Take list_mutex to sync with find_victim(). 157 */ 158 mutex_lock(&cbe_spu_info[node].list_mutex); 159 __spu_update_sched_info(ctx); 160 mutex_unlock(&cbe_spu_info[node].list_mutex); 161 } else { 162 __spu_update_sched_info(ctx); 163 } 164 } 165 166 static int __node_allowed(struct spu_context *ctx, int node) 167 { 168 if (nr_cpus_node(node)) { 169 const struct cpumask *mask = cpumask_of_node(node); 170 171 if (cpumask_intersects(mask, &ctx->cpus_allowed)) 172 return 1; 173 } 174 175 return 0; 176 } 177 178 static int node_allowed(struct spu_context *ctx, int node) 179 { 180 int rval; 181 182 spin_lock(&spu_prio->runq_lock); 183 rval = __node_allowed(ctx, node); 184 spin_unlock(&spu_prio->runq_lock); 185 186 return rval; 187 } 188 189 void do_notify_spus_active(void) 190 { 191 int node; 192 193 /* 194 * Wake up the active spu_contexts. 195 * 196 * When the awakened processes see their "notify_active" flag is set, 197 * they will call spu_switch_notify(). 198 */ 199 for_each_online_node(node) { 200 struct spu *spu; 201 202 mutex_lock(&cbe_spu_info[node].list_mutex); 203 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { 204 if (spu->alloc_state != SPU_FREE) { 205 struct spu_context *ctx = spu->ctx; 206 set_bit(SPU_SCHED_NOTIFY_ACTIVE, 207 &ctx->sched_flags); 208 mb(); 209 wake_up_all(&ctx->stop_wq); 210 } 211 } 212 mutex_unlock(&cbe_spu_info[node].list_mutex); 213 } 214 } 215 216 /** 217 * spu_bind_context - bind spu context to physical spu 218 * @spu: physical spu to bind to 219 * @ctx: context to bind 220 */ 221 static void spu_bind_context(struct spu *spu, struct spu_context *ctx) 222 { 223 spu_context_trace(spu_bind_context__enter, ctx, spu); 224 225 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 226 227 if (ctx->flags & SPU_CREATE_NOSCHED) 228 atomic_inc(&cbe_spu_info[spu->node].reserved_spus); 229 230 ctx->stats.slb_flt_base = spu->stats.slb_flt; 231 ctx->stats.class2_intr_base = spu->stats.class2_intr; 232 233 spu_associate_mm(spu, ctx->owner); 234 235 spin_lock_irq(&spu->register_lock); 236 spu->ctx = ctx; 237 spu->flags = 0; 238 ctx->spu = spu; 239 ctx->ops = &spu_hw_ops; 240 spu->pid = current->pid; 241 spu->tgid = current->tgid; 242 spu->ibox_callback = spufs_ibox_callback; 243 spu->wbox_callback = spufs_wbox_callback; 244 spu->stop_callback = spufs_stop_callback; 245 spu->mfc_callback = spufs_mfc_callback; 246 spin_unlock_irq(&spu->register_lock); 247 248 spu_unmap_mappings(ctx); 249 250 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); 251 spu_restore(&ctx->csa, spu); 252 spu->timestamp = jiffies; 253 spu_switch_notify(spu, ctx); 254 ctx->state = SPU_STATE_RUNNABLE; 255 256 spuctx_switch_state(ctx, SPU_UTIL_USER); 257 } 258 259 /* 260 * Must be used with the list_mutex held. 261 */ 262 static inline int sched_spu(struct spu *spu) 263 { 264 BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex)); 265 266 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED)); 267 } 268 269 static void aff_merge_remaining_ctxs(struct spu_gang *gang) 270 { 271 struct spu_context *ctx; 272 273 list_for_each_entry(ctx, &gang->aff_list_head, aff_list) { 274 if (list_empty(&ctx->aff_list)) 275 list_add(&ctx->aff_list, &gang->aff_list_head); 276 } 277 gang->aff_flags |= AFF_MERGED; 278 } 279 280 static void aff_set_offsets(struct spu_gang *gang) 281 { 282 struct spu_context *ctx; 283 int offset; 284 285 offset = -1; 286 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list, 287 aff_list) { 288 if (&ctx->aff_list == &gang->aff_list_head) 289 break; 290 ctx->aff_offset = offset--; 291 } 292 293 offset = 0; 294 list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) { 295 if (&ctx->aff_list == &gang->aff_list_head) 296 break; 297 ctx->aff_offset = offset++; 298 } 299 300 gang->aff_flags |= AFF_OFFSETS_SET; 301 } 302 303 static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff, 304 int group_size, int lowest_offset) 305 { 306 struct spu *spu; 307 int node, n; 308 309 /* 310 * TODO: A better algorithm could be used to find a good spu to be 311 * used as reference location for the ctxs chain. 312 */ 313 node = cpu_to_node(raw_smp_processor_id()); 314 for (n = 0; n < MAX_NUMNODES; n++, node++) { 315 /* 316 * "available_spus" counts how many spus are not potentially 317 * going to be used by other affinity gangs whose reference 318 * context is already in place. Although this code seeks to 319 * avoid having affinity gangs with a summed amount of 320 * contexts bigger than the amount of spus in the node, 321 * this may happen sporadically. In this case, available_spus 322 * becomes negative, which is harmless. 323 */ 324 int available_spus; 325 326 node = (node < MAX_NUMNODES) ? node : 0; 327 if (!node_allowed(ctx, node)) 328 continue; 329 330 available_spus = 0; 331 mutex_lock(&cbe_spu_info[node].list_mutex); 332 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { 333 if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset 334 && spu->ctx->gang->aff_ref_spu) 335 available_spus -= spu->ctx->gang->contexts; 336 available_spus++; 337 } 338 if (available_spus < ctx->gang->contexts) { 339 mutex_unlock(&cbe_spu_info[node].list_mutex); 340 continue; 341 } 342 343 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { 344 if ((!mem_aff || spu->has_mem_affinity) && 345 sched_spu(spu)) { 346 mutex_unlock(&cbe_spu_info[node].list_mutex); 347 return spu; 348 } 349 } 350 mutex_unlock(&cbe_spu_info[node].list_mutex); 351 } 352 return NULL; 353 } 354 355 static void aff_set_ref_point_location(struct spu_gang *gang) 356 { 357 int mem_aff, gs, lowest_offset; 358 struct spu_context *ctx; 359 struct spu *tmp; 360 361 mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM; 362 lowest_offset = 0; 363 gs = 0; 364 365 list_for_each_entry(tmp, &gang->aff_list_head, aff_list) 366 gs++; 367 368 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list, 369 aff_list) { 370 if (&ctx->aff_list == &gang->aff_list_head) 371 break; 372 lowest_offset = ctx->aff_offset; 373 } 374 375 gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs, 376 lowest_offset); 377 } 378 379 static struct spu *ctx_location(struct spu *ref, int offset, int node) 380 { 381 struct spu *spu; 382 383 spu = NULL; 384 if (offset >= 0) { 385 list_for_each_entry(spu, ref->aff_list.prev, aff_list) { 386 BUG_ON(spu->node != node); 387 if (offset == 0) 388 break; 389 if (sched_spu(spu)) 390 offset--; 391 } 392 } else { 393 list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) { 394 BUG_ON(spu->node != node); 395 if (offset == 0) 396 break; 397 if (sched_spu(spu)) 398 offset++; 399 } 400 } 401 402 return spu; 403 } 404 405 /* 406 * affinity_check is called each time a context is going to be scheduled. 407 * It returns the spu ptr on which the context must run. 408 */ 409 static int has_affinity(struct spu_context *ctx) 410 { 411 struct spu_gang *gang = ctx->gang; 412 413 if (list_empty(&ctx->aff_list)) 414 return 0; 415 416 if (atomic_read(&ctx->gang->aff_sched_count) == 0) 417 ctx->gang->aff_ref_spu = NULL; 418 419 if (!gang->aff_ref_spu) { 420 if (!(gang->aff_flags & AFF_MERGED)) 421 aff_merge_remaining_ctxs(gang); 422 if (!(gang->aff_flags & AFF_OFFSETS_SET)) 423 aff_set_offsets(gang); 424 aff_set_ref_point_location(gang); 425 } 426 427 return gang->aff_ref_spu != NULL; 428 } 429 430 /** 431 * spu_unbind_context - unbind spu context from physical spu 432 * @spu: physical spu to unbind from 433 * @ctx: context to unbind 434 */ 435 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) 436 { 437 u32 status; 438 439 spu_context_trace(spu_unbind_context__enter, ctx, spu); 440 441 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 442 443 if (spu->ctx->flags & SPU_CREATE_NOSCHED) 444 atomic_dec(&cbe_spu_info[spu->node].reserved_spus); 445 446 if (ctx->gang) 447 /* 448 * If ctx->gang->aff_sched_count is positive, SPU affinity is 449 * being considered in this gang. Using atomic_dec_if_positive 450 * allow us to skip an explicit check for affinity in this gang 451 */ 452 atomic_dec_if_positive(&ctx->gang->aff_sched_count); 453 454 spu_switch_notify(spu, NULL); 455 spu_unmap_mappings(ctx); 456 spu_save(&ctx->csa, spu); 457 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0); 458 459 spin_lock_irq(&spu->register_lock); 460 spu->timestamp = jiffies; 461 ctx->state = SPU_STATE_SAVED; 462 spu->ibox_callback = NULL; 463 spu->wbox_callback = NULL; 464 spu->stop_callback = NULL; 465 spu->mfc_callback = NULL; 466 spu->pid = 0; 467 spu->tgid = 0; 468 ctx->ops = &spu_backing_ops; 469 spu->flags = 0; 470 spu->ctx = NULL; 471 spin_unlock_irq(&spu->register_lock); 472 473 spu_associate_mm(spu, NULL); 474 475 ctx->stats.slb_flt += 476 (spu->stats.slb_flt - ctx->stats.slb_flt_base); 477 ctx->stats.class2_intr += 478 (spu->stats.class2_intr - ctx->stats.class2_intr_base); 479 480 /* This maps the underlying spu state to idle */ 481 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); 482 ctx->spu = NULL; 483 484 if (spu_stopped(ctx, &status)) 485 wake_up_all(&ctx->stop_wq); 486 } 487 488 /** 489 * spu_add_to_rq - add a context to the runqueue 490 * @ctx: context to add 491 */ 492 static void __spu_add_to_rq(struct spu_context *ctx) 493 { 494 /* 495 * Unfortunately this code path can be called from multiple threads 496 * on behalf of a single context due to the way the problem state 497 * mmap support works. 498 * 499 * Fortunately we need to wake up all these threads at the same time 500 * and can simply skip the runqueue addition for every but the first 501 * thread getting into this codepath. 502 * 503 * It's still quite hacky, and long-term we should proxy all other 504 * threads through the owner thread so that spu_run is in control 505 * of all the scheduling activity for a given context. 506 */ 507 if (list_empty(&ctx->rq)) { 508 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); 509 set_bit(ctx->prio, spu_prio->bitmap); 510 if (!spu_prio->nr_waiting++) 511 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); 512 } 513 } 514 515 static void spu_add_to_rq(struct spu_context *ctx) 516 { 517 spin_lock(&spu_prio->runq_lock); 518 __spu_add_to_rq(ctx); 519 spin_unlock(&spu_prio->runq_lock); 520 } 521 522 static void __spu_del_from_rq(struct spu_context *ctx) 523 { 524 int prio = ctx->prio; 525 526 if (!list_empty(&ctx->rq)) { 527 if (!--spu_prio->nr_waiting) 528 del_timer(&spusched_timer); 529 list_del_init(&ctx->rq); 530 531 if (list_empty(&spu_prio->runq[prio])) 532 clear_bit(prio, spu_prio->bitmap); 533 } 534 } 535 536 void spu_del_from_rq(struct spu_context *ctx) 537 { 538 spin_lock(&spu_prio->runq_lock); 539 __spu_del_from_rq(ctx); 540 spin_unlock(&spu_prio->runq_lock); 541 } 542 543 static void spu_prio_wait(struct spu_context *ctx) 544 { 545 DEFINE_WAIT(wait); 546 547 /* 548 * The caller must explicitly wait for a context to be loaded 549 * if the nosched flag is set. If NOSCHED is not set, the caller 550 * queues the context and waits for an spu event or error. 551 */ 552 BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED)); 553 554 spin_lock(&spu_prio->runq_lock); 555 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); 556 if (!signal_pending(current)) { 557 __spu_add_to_rq(ctx); 558 spin_unlock(&spu_prio->runq_lock); 559 mutex_unlock(&ctx->state_mutex); 560 schedule(); 561 mutex_lock(&ctx->state_mutex); 562 spin_lock(&spu_prio->runq_lock); 563 __spu_del_from_rq(ctx); 564 } 565 spin_unlock(&spu_prio->runq_lock); 566 __set_current_state(TASK_RUNNING); 567 remove_wait_queue(&ctx->stop_wq, &wait); 568 } 569 570 static struct spu *spu_get_idle(struct spu_context *ctx) 571 { 572 struct spu *spu, *aff_ref_spu; 573 int node, n; 574 575 spu_context_nospu_trace(spu_get_idle__enter, ctx); 576 577 if (ctx->gang) { 578 mutex_lock(&ctx->gang->aff_mutex); 579 if (has_affinity(ctx)) { 580 aff_ref_spu = ctx->gang->aff_ref_spu; 581 atomic_inc(&ctx->gang->aff_sched_count); 582 mutex_unlock(&ctx->gang->aff_mutex); 583 node = aff_ref_spu->node; 584 585 mutex_lock(&cbe_spu_info[node].list_mutex); 586 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node); 587 if (spu && spu->alloc_state == SPU_FREE) 588 goto found; 589 mutex_unlock(&cbe_spu_info[node].list_mutex); 590 591 atomic_dec(&ctx->gang->aff_sched_count); 592 goto not_found; 593 } 594 mutex_unlock(&ctx->gang->aff_mutex); 595 } 596 node = cpu_to_node(raw_smp_processor_id()); 597 for (n = 0; n < MAX_NUMNODES; n++, node++) { 598 node = (node < MAX_NUMNODES) ? node : 0; 599 if (!node_allowed(ctx, node)) 600 continue; 601 602 mutex_lock(&cbe_spu_info[node].list_mutex); 603 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { 604 if (spu->alloc_state == SPU_FREE) 605 goto found; 606 } 607 mutex_unlock(&cbe_spu_info[node].list_mutex); 608 } 609 610 not_found: 611 spu_context_nospu_trace(spu_get_idle__not_found, ctx); 612 return NULL; 613 614 found: 615 spu->alloc_state = SPU_USED; 616 mutex_unlock(&cbe_spu_info[node].list_mutex); 617 spu_context_trace(spu_get_idle__found, ctx, spu); 618 spu_init_channels(spu); 619 return spu; 620 } 621 622 /** 623 * find_victim - find a lower priority context to preempt 624 * @ctx: canidate context for running 625 * 626 * Returns the freed physical spu to run the new context on. 627 */ 628 static struct spu *find_victim(struct spu_context *ctx) 629 { 630 struct spu_context *victim = NULL; 631 struct spu *spu; 632 int node, n; 633 634 spu_context_nospu_trace(spu_find_victim__enter, ctx); 635 636 /* 637 * Look for a possible preemption candidate on the local node first. 638 * If there is no candidate look at the other nodes. This isn't 639 * exactly fair, but so far the whole spu scheduler tries to keep 640 * a strong node affinity. We might want to fine-tune this in 641 * the future. 642 */ 643 restart: 644 node = cpu_to_node(raw_smp_processor_id()); 645 for (n = 0; n < MAX_NUMNODES; n++, node++) { 646 node = (node < MAX_NUMNODES) ? node : 0; 647 if (!node_allowed(ctx, node)) 648 continue; 649 650 mutex_lock(&cbe_spu_info[node].list_mutex); 651 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { 652 struct spu_context *tmp = spu->ctx; 653 654 if (tmp && tmp->prio > ctx->prio && 655 !(tmp->flags & SPU_CREATE_NOSCHED) && 656 (!victim || tmp->prio > victim->prio)) { 657 victim = spu->ctx; 658 } 659 } 660 if (victim) 661 get_spu_context(victim); 662 mutex_unlock(&cbe_spu_info[node].list_mutex); 663 664 if (victim) { 665 /* 666 * This nests ctx->state_mutex, but we always lock 667 * higher priority contexts before lower priority 668 * ones, so this is safe until we introduce 669 * priority inheritance schemes. 670 * 671 * XXX if the highest priority context is locked, 672 * this can loop a long time. Might be better to 673 * look at another context or give up after X retries. 674 */ 675 if (!mutex_trylock(&victim->state_mutex)) { 676 put_spu_context(victim); 677 victim = NULL; 678 goto restart; 679 } 680 681 spu = victim->spu; 682 if (!spu || victim->prio <= ctx->prio) { 683 /* 684 * This race can happen because we've dropped 685 * the active list mutex. Not a problem, just 686 * restart the search. 687 */ 688 mutex_unlock(&victim->state_mutex); 689 put_spu_context(victim); 690 victim = NULL; 691 goto restart; 692 } 693 694 spu_context_trace(__spu_deactivate__unload, ctx, spu); 695 696 mutex_lock(&cbe_spu_info[node].list_mutex); 697 cbe_spu_info[node].nr_active--; 698 spu_unbind_context(spu, victim); 699 mutex_unlock(&cbe_spu_info[node].list_mutex); 700 701 victim->stats.invol_ctx_switch++; 702 spu->stats.invol_ctx_switch++; 703 if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags)) 704 spu_add_to_rq(victim); 705 706 mutex_unlock(&victim->state_mutex); 707 put_spu_context(victim); 708 709 return spu; 710 } 711 } 712 713 return NULL; 714 } 715 716 static void __spu_schedule(struct spu *spu, struct spu_context *ctx) 717 { 718 int node = spu->node; 719 int success = 0; 720 721 spu_set_timeslice(ctx); 722 723 mutex_lock(&cbe_spu_info[node].list_mutex); 724 if (spu->ctx == NULL) { 725 spu_bind_context(spu, ctx); 726 cbe_spu_info[node].nr_active++; 727 spu->alloc_state = SPU_USED; 728 success = 1; 729 } 730 mutex_unlock(&cbe_spu_info[node].list_mutex); 731 732 if (success) 733 wake_up_all(&ctx->run_wq); 734 else 735 spu_add_to_rq(ctx); 736 } 737 738 static void spu_schedule(struct spu *spu, struct spu_context *ctx) 739 { 740 /* not a candidate for interruptible because it's called either 741 from the scheduler thread or from spu_deactivate */ 742 mutex_lock(&ctx->state_mutex); 743 if (ctx->state == SPU_STATE_SAVED) 744 __spu_schedule(spu, ctx); 745 spu_release(ctx); 746 } 747 748 /** 749 * spu_unschedule - remove a context from a spu, and possibly release it. 750 * @spu: The SPU to unschedule from 751 * @ctx: The context currently scheduled on the SPU 752 * @free_spu Whether to free the SPU for other contexts 753 * 754 * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the 755 * SPU is made available for other contexts (ie, may be returned by 756 * spu_get_idle). If this is zero, the caller is expected to schedule another 757 * context to this spu. 758 * 759 * Should be called with ctx->state_mutex held. 760 */ 761 static void spu_unschedule(struct spu *spu, struct spu_context *ctx, 762 int free_spu) 763 { 764 int node = spu->node; 765 766 mutex_lock(&cbe_spu_info[node].list_mutex); 767 cbe_spu_info[node].nr_active--; 768 if (free_spu) 769 spu->alloc_state = SPU_FREE; 770 spu_unbind_context(spu, ctx); 771 ctx->stats.invol_ctx_switch++; 772 spu->stats.invol_ctx_switch++; 773 mutex_unlock(&cbe_spu_info[node].list_mutex); 774 } 775 776 /** 777 * spu_activate - find a free spu for a context and execute it 778 * @ctx: spu context to schedule 779 * @flags: flags (currently ignored) 780 * 781 * Tries to find a free spu to run @ctx. If no free spu is available 782 * add the context to the runqueue so it gets woken up once an spu 783 * is available. 784 */ 785 int spu_activate(struct spu_context *ctx, unsigned long flags) 786 { 787 struct spu *spu; 788 789 /* 790 * If there are multiple threads waiting for a single context 791 * only one actually binds the context while the others will 792 * only be able to acquire the state_mutex once the context 793 * already is in runnable state. 794 */ 795 if (ctx->spu) 796 return 0; 797 798 spu_activate_top: 799 if (signal_pending(current)) 800 return -ERESTARTSYS; 801 802 spu = spu_get_idle(ctx); 803 /* 804 * If this is a realtime thread we try to get it running by 805 * preempting a lower priority thread. 806 */ 807 if (!spu && rt_prio(ctx->prio)) 808 spu = find_victim(ctx); 809 if (spu) { 810 unsigned long runcntl; 811 812 runcntl = ctx->ops->runcntl_read(ctx); 813 __spu_schedule(spu, ctx); 814 if (runcntl & SPU_RUNCNTL_RUNNABLE) 815 spuctx_switch_state(ctx, SPU_UTIL_USER); 816 817 return 0; 818 } 819 820 if (ctx->flags & SPU_CREATE_NOSCHED) { 821 spu_prio_wait(ctx); 822 goto spu_activate_top; 823 } 824 825 spu_add_to_rq(ctx); 826 827 return 0; 828 } 829 830 /** 831 * grab_runnable_context - try to find a runnable context 832 * 833 * Remove the highest priority context on the runqueue and return it 834 * to the caller. Returns %NULL if no runnable context was found. 835 */ 836 static struct spu_context *grab_runnable_context(int prio, int node) 837 { 838 struct spu_context *ctx; 839 int best; 840 841 spin_lock(&spu_prio->runq_lock); 842 best = find_first_bit(spu_prio->bitmap, prio); 843 while (best < prio) { 844 struct list_head *rq = &spu_prio->runq[best]; 845 846 list_for_each_entry(ctx, rq, rq) { 847 /* XXX(hch): check for affinity here aswell */ 848 if (__node_allowed(ctx, node)) { 849 __spu_del_from_rq(ctx); 850 goto found; 851 } 852 } 853 best++; 854 } 855 ctx = NULL; 856 found: 857 spin_unlock(&spu_prio->runq_lock); 858 return ctx; 859 } 860 861 static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) 862 { 863 struct spu *spu = ctx->spu; 864 struct spu_context *new = NULL; 865 866 if (spu) { 867 new = grab_runnable_context(max_prio, spu->node); 868 if (new || force) { 869 spu_unschedule(spu, ctx, new == NULL); 870 if (new) { 871 if (new->flags & SPU_CREATE_NOSCHED) 872 wake_up(&new->stop_wq); 873 else { 874 spu_release(ctx); 875 spu_schedule(spu, new); 876 /* this one can't easily be made 877 interruptible */ 878 mutex_lock(&ctx->state_mutex); 879 } 880 } 881 } 882 } 883 884 return new != NULL; 885 } 886 887 /** 888 * spu_deactivate - unbind a context from it's physical spu 889 * @ctx: spu context to unbind 890 * 891 * Unbind @ctx from the physical spu it is running on and schedule 892 * the highest priority context to run on the freed physical spu. 893 */ 894 void spu_deactivate(struct spu_context *ctx) 895 { 896 spu_context_nospu_trace(spu_deactivate__enter, ctx); 897 __spu_deactivate(ctx, 1, MAX_PRIO); 898 } 899 900 /** 901 * spu_yield - yield a physical spu if others are waiting 902 * @ctx: spu context to yield 903 * 904 * Check if there is a higher priority context waiting and if yes 905 * unbind @ctx from the physical spu and schedule the highest 906 * priority context to run on the freed physical spu instead. 907 */ 908 void spu_yield(struct spu_context *ctx) 909 { 910 spu_context_nospu_trace(spu_yield__enter, ctx); 911 if (!(ctx->flags & SPU_CREATE_NOSCHED)) { 912 mutex_lock(&ctx->state_mutex); 913 __spu_deactivate(ctx, 0, MAX_PRIO); 914 mutex_unlock(&ctx->state_mutex); 915 } 916 } 917 918 static noinline void spusched_tick(struct spu_context *ctx) 919 { 920 struct spu_context *new = NULL; 921 struct spu *spu = NULL; 922 923 if (spu_acquire(ctx)) 924 BUG(); /* a kernel thread never has signals pending */ 925 926 if (ctx->state != SPU_STATE_RUNNABLE) 927 goto out; 928 if (ctx->flags & SPU_CREATE_NOSCHED) 929 goto out; 930 if (ctx->policy == SCHED_FIFO) 931 goto out; 932 933 if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) 934 goto out; 935 936 spu = ctx->spu; 937 938 spu_context_trace(spusched_tick__preempt, ctx, spu); 939 940 new = grab_runnable_context(ctx->prio + 1, spu->node); 941 if (new) { 942 spu_unschedule(spu, ctx, 0); 943 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) 944 spu_add_to_rq(ctx); 945 } else { 946 spu_context_nospu_trace(spusched_tick__newslice, ctx); 947 if (!ctx->time_slice) 948 ctx->time_slice++; 949 } 950 out: 951 spu_release(ctx); 952 953 if (new) 954 spu_schedule(spu, new); 955 } 956 957 /** 958 * count_active_contexts - count nr of active tasks 959 * 960 * Return the number of tasks currently running or waiting to run. 961 * 962 * Note that we don't take runq_lock / list_mutex here. Reading 963 * a single 32bit value is atomic on powerpc, and we don't care 964 * about memory ordering issues here. 965 */ 966 static unsigned long count_active_contexts(void) 967 { 968 int nr_active = 0, node; 969 970 for (node = 0; node < MAX_NUMNODES; node++) 971 nr_active += cbe_spu_info[node].nr_active; 972 nr_active += spu_prio->nr_waiting; 973 974 return nr_active; 975 } 976 977 /** 978 * spu_calc_load - update the avenrun load estimates. 979 * 980 * No locking against reading these values from userspace, as for 981 * the CPU loadavg code. 982 */ 983 static void spu_calc_load(void) 984 { 985 unsigned long active_tasks; /* fixed-point */ 986 987 active_tasks = count_active_contexts() * FIXED_1; 988 CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks); 989 CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks); 990 CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks); 991 } 992 993 static void spusched_wake(unsigned long data) 994 { 995 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); 996 wake_up_process(spusched_task); 997 } 998 999 static void spuloadavg_wake(unsigned long data) 1000 { 1001 mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ); 1002 spu_calc_load(); 1003 } 1004 1005 static int spusched_thread(void *unused) 1006 { 1007 struct spu *spu; 1008 int node; 1009 1010 while (!kthread_should_stop()) { 1011 set_current_state(TASK_INTERRUPTIBLE); 1012 schedule(); 1013 for (node = 0; node < MAX_NUMNODES; node++) { 1014 struct mutex *mtx = &cbe_spu_info[node].list_mutex; 1015 1016 mutex_lock(mtx); 1017 list_for_each_entry(spu, &cbe_spu_info[node].spus, 1018 cbe_list) { 1019 struct spu_context *ctx = spu->ctx; 1020 1021 if (ctx) { 1022 get_spu_context(ctx); 1023 mutex_unlock(mtx); 1024 spusched_tick(ctx); 1025 mutex_lock(mtx); 1026 put_spu_context(ctx); 1027 } 1028 } 1029 mutex_unlock(mtx); 1030 } 1031 } 1032 1033 return 0; 1034 } 1035 1036 void spuctx_switch_state(struct spu_context *ctx, 1037 enum spu_utilization_state new_state) 1038 { 1039 unsigned long long curtime; 1040 signed long long delta; 1041 struct timespec ts; 1042 struct spu *spu; 1043 enum spu_utilization_state old_state; 1044 int node; 1045 1046 ktime_get_ts(&ts); 1047 curtime = timespec_to_ns(&ts); 1048 delta = curtime - ctx->stats.tstamp; 1049 1050 WARN_ON(!mutex_is_locked(&ctx->state_mutex)); 1051 WARN_ON(delta < 0); 1052 1053 spu = ctx->spu; 1054 old_state = ctx->stats.util_state; 1055 ctx->stats.util_state = new_state; 1056 ctx->stats.tstamp = curtime; 1057 1058 /* 1059 * Update the physical SPU utilization statistics. 1060 */ 1061 if (spu) { 1062 ctx->stats.times[old_state] += delta; 1063 spu->stats.times[old_state] += delta; 1064 spu->stats.util_state = new_state; 1065 spu->stats.tstamp = curtime; 1066 node = spu->node; 1067 if (old_state == SPU_UTIL_USER) 1068 atomic_dec(&cbe_spu_info[node].busy_spus); 1069 if (new_state == SPU_UTIL_USER) 1070 atomic_inc(&cbe_spu_info[node].busy_spus); 1071 } 1072 } 1073 1074 #define LOAD_INT(x) ((x) >> FSHIFT) 1075 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) 1076 1077 static int show_spu_loadavg(struct seq_file *s, void *private) 1078 { 1079 int a, b, c; 1080 1081 a = spu_avenrun[0] + (FIXED_1/200); 1082 b = spu_avenrun[1] + (FIXED_1/200); 1083 c = spu_avenrun[2] + (FIXED_1/200); 1084 1085 /* 1086 * Note that last_pid doesn't really make much sense for the 1087 * SPU loadavg (it even seems very odd on the CPU side...), 1088 * but we include it here to have a 100% compatible interface. 1089 */ 1090 seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n", 1091 LOAD_INT(a), LOAD_FRAC(a), 1092 LOAD_INT(b), LOAD_FRAC(b), 1093 LOAD_INT(c), LOAD_FRAC(c), 1094 count_active_contexts(), 1095 atomic_read(&nr_spu_contexts), 1096 current->nsproxy->pid_ns->last_pid); 1097 return 0; 1098 } 1099 1100 static int spu_loadavg_open(struct inode *inode, struct file *file) 1101 { 1102 return single_open(file, show_spu_loadavg, NULL); 1103 } 1104 1105 static const struct file_operations spu_loadavg_fops = { 1106 .open = spu_loadavg_open, 1107 .read = seq_read, 1108 .llseek = seq_lseek, 1109 .release = single_release, 1110 }; 1111 1112 int __init spu_sched_init(void) 1113 { 1114 struct proc_dir_entry *entry; 1115 int err = -ENOMEM, i; 1116 1117 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL); 1118 if (!spu_prio) 1119 goto out; 1120 1121 for (i = 0; i < MAX_PRIO; i++) { 1122 INIT_LIST_HEAD(&spu_prio->runq[i]); 1123 __clear_bit(i, spu_prio->bitmap); 1124 } 1125 spin_lock_init(&spu_prio->runq_lock); 1126 1127 setup_timer(&spusched_timer, spusched_wake, 0); 1128 setup_timer(&spuloadavg_timer, spuloadavg_wake, 0); 1129 1130 spusched_task = kthread_run(spusched_thread, NULL, "spusched"); 1131 if (IS_ERR(spusched_task)) { 1132 err = PTR_ERR(spusched_task); 1133 goto out_free_spu_prio; 1134 } 1135 1136 mod_timer(&spuloadavg_timer, 0); 1137 1138 entry = proc_create("spu_loadavg", 0, NULL, &spu_loadavg_fops); 1139 if (!entry) 1140 goto out_stop_kthread; 1141 1142 pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n", 1143 SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE); 1144 return 0; 1145 1146 out_stop_kthread: 1147 kthread_stop(spusched_task); 1148 out_free_spu_prio: 1149 kfree(spu_prio); 1150 out: 1151 return err; 1152 } 1153 1154 void spu_sched_exit(void) 1155 { 1156 struct spu *spu; 1157 int node; 1158 1159 remove_proc_entry("spu_loadavg", NULL); 1160 1161 del_timer_sync(&spusched_timer); 1162 del_timer_sync(&spuloadavg_timer); 1163 kthread_stop(spusched_task); 1164 1165 for (node = 0; node < MAX_NUMNODES; node++) { 1166 mutex_lock(&cbe_spu_info[node].list_mutex); 1167 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) 1168 if (spu->alloc_state != SPU_FREE) 1169 spu->alloc_state = SPU_FREE; 1170 mutex_unlock(&cbe_spu_info[node].list_mutex); 1171 } 1172 kfree(spu_prio); 1173 } 1174