1 /* 2 * SN Platform GRU Driver 3 * 4 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file "COPYING" in the main directory of this archive 8 * for more details. 9 * 10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/mm.h> 16 #include <linux/spinlock.h> 17 #include <linux/sched.h> 18 #include <linux/device.h> 19 #include <linux/list.h> 20 #include <asm/uv/uv_hub.h> 21 #include "gru.h" 22 #include "grutables.h" 23 #include "gruhandles.h" 24 25 unsigned long gru_options __read_mostly; 26 27 static struct device_driver gru_driver = { 28 .name = "gru" 29 }; 30 31 static struct device gru_device = { 32 .init_name = "", 33 .driver = &gru_driver, 34 }; 35 36 struct device *grudev = &gru_device; 37 38 /* 39 * Select a gru fault map to be used by the current cpu. Note that 40 * multiple cpus may be using the same map. 41 * ZZZ should "shift" be used?? Depends on HT cpu numbering 42 * ZZZ should be inline but did not work on emulator 43 */ 44 int gru_cpu_fault_map_id(void) 45 { 46 return uv_blade_processor_id() % GRU_NUM_TFM; 47 } 48 49 /*--------- ASID Management ------------------------------------------- 50 * 51 * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID. 52 * Once MAX is reached, flush the TLB & start over. However, 53 * some asids may still be in use. There won't be many (percentage wise) still 54 * in use. Search active contexts & determine the value of the first 55 * asid in use ("x"s below). Set "limit" to this value. 56 * This defines a block of assignable asids. 57 * 58 * When "limit" is reached, search forward from limit+1 and determine the 59 * next block of assignable asids. 60 * 61 * Repeat until MAX_ASID is reached, then start over again. 62 * 63 * Each time MAX_ASID is reached, increment the asid generation. Since 64 * the search for in-use asids only checks contexts with GRUs currently 65 * assigned, asids in some contexts will be missed. Prior to loading 66 * a context, the asid generation of the GTS asid is rechecked. If it 67 * doesn't match the current generation, a new asid will be assigned. 68 * 69 * 0---------------x------------x---------------------x----| 70 * ^-next ^-limit ^-MAX_ASID 71 * 72 * All asid manipulation & context loading/unloading is protected by the 73 * gs_lock. 74 */ 75 76 /* Hit the asid limit. Start over */ 77 static int gru_wrap_asid(struct gru_state *gru) 78 { 79 gru_dbg(grudev, "gid %d\n", gru->gs_gid); 80 STAT(asid_wrap); 81 gru->gs_asid_gen++; 82 return MIN_ASID; 83 } 84 85 /* Find the next chunk of unused asids */ 86 static int gru_reset_asid_limit(struct gru_state *gru, int asid) 87 { 88 int i, gid, inuse_asid, limit; 89 90 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); 91 STAT(asid_next); 92 limit = MAX_ASID; 93 if (asid >= limit) 94 asid = gru_wrap_asid(gru); 95 gru_flush_all_tlb(gru); 96 gid = gru->gs_gid; 97 again: 98 for (i = 0; i < GRU_NUM_CCH; i++) { 99 if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i])) 100 continue; 101 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; 102 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n", 103 gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms, 104 inuse_asid, i); 105 if (inuse_asid == asid) { 106 asid += ASID_INC; 107 if (asid >= limit) { 108 /* 109 * empty range: reset the range limit and 110 * start over 111 */ 112 limit = MAX_ASID; 113 if (asid >= MAX_ASID) 114 asid = gru_wrap_asid(gru); 115 goto again; 116 } 117 } 118 119 if ((inuse_asid > asid) && (inuse_asid < limit)) 120 limit = inuse_asid; 121 } 122 gru->gs_asid_limit = limit; 123 gru->gs_asid = asid; 124 gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid, 125 asid, limit); 126 return asid; 127 } 128 129 /* Assign a new ASID to a thread context. */ 130 static int gru_assign_asid(struct gru_state *gru) 131 { 132 int asid; 133 134 gru->gs_asid += ASID_INC; 135 asid = gru->gs_asid; 136 if (asid >= gru->gs_asid_limit) 137 asid = gru_reset_asid_limit(gru, asid); 138 139 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); 140 return asid; 141 } 142 143 /* 144 * Clear n bits in a word. Return a word indicating the bits that were cleared. 145 * Optionally, build an array of chars that contain the bit numbers allocated. 146 */ 147 static unsigned long reserve_resources(unsigned long *p, int n, int mmax, 148 char *idx) 149 { 150 unsigned long bits = 0; 151 int i; 152 153 while (n--) { 154 i = find_first_bit(p, mmax); 155 if (i == mmax) 156 BUG(); 157 __clear_bit(i, p); 158 __set_bit(i, &bits); 159 if (idx) 160 *idx++ = i; 161 } 162 return bits; 163 } 164 165 unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count, 166 char *cbmap) 167 { 168 return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU, 169 cbmap); 170 } 171 172 unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count, 173 char *dsmap) 174 { 175 return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU, 176 dsmap); 177 } 178 179 static void reserve_gru_resources(struct gru_state *gru, 180 struct gru_thread_state *gts) 181 { 182 gru->gs_active_contexts++; 183 gts->ts_cbr_map = 184 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count, 185 gts->ts_cbr_idx); 186 gts->ts_dsr_map = 187 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL); 188 } 189 190 static void free_gru_resources(struct gru_state *gru, 191 struct gru_thread_state *gts) 192 { 193 gru->gs_active_contexts--; 194 gru->gs_cbr_map |= gts->ts_cbr_map; 195 gru->gs_dsr_map |= gts->ts_dsr_map; 196 } 197 198 /* 199 * Check if a GRU has sufficient free resources to satisfy an allocation 200 * request. Note: GRU locks may or may not be held when this is called. If 201 * not held, recheck after acquiring the appropriate locks. 202 * 203 * Returns 1 if sufficient resources, 0 if not 204 */ 205 static int check_gru_resources(struct gru_state *gru, int cbr_au_count, 206 int dsr_au_count, int max_active_contexts) 207 { 208 return hweight64(gru->gs_cbr_map) >= cbr_au_count 209 && hweight64(gru->gs_dsr_map) >= dsr_au_count 210 && gru->gs_active_contexts < max_active_contexts; 211 } 212 213 /* 214 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG 215 * context. 216 */ 217 static int gru_load_mm_tracker(struct gru_state *gru, 218 struct gru_thread_state *gts) 219 { 220 struct gru_mm_struct *gms = gts->ts_gms; 221 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid]; 222 unsigned short ctxbitmap = (1 << gts->ts_ctxnum); 223 int asid; 224 225 spin_lock(&gms->ms_asid_lock); 226 asid = asids->mt_asid; 227 228 spin_lock(&gru->gs_asid_lock); 229 if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen != 230 gru->gs_asid_gen)) { 231 asid = gru_assign_asid(gru); 232 asids->mt_asid = asid; 233 asids->mt_asid_gen = gru->gs_asid_gen; 234 STAT(asid_new); 235 } else { 236 STAT(asid_reuse); 237 } 238 spin_unlock(&gru->gs_asid_lock); 239 240 BUG_ON(asids->mt_ctxbitmap & ctxbitmap); 241 asids->mt_ctxbitmap |= ctxbitmap; 242 if (!test_bit(gru->gs_gid, gms->ms_asidmap)) 243 __set_bit(gru->gs_gid, gms->ms_asidmap); 244 spin_unlock(&gms->ms_asid_lock); 245 246 gru_dbg(grudev, 247 "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n", 248 gru->gs_gid, gts, gms, gts->ts_ctxnum, asid, 249 gms->ms_asidmap[0]); 250 return asid; 251 } 252 253 static void gru_unload_mm_tracker(struct gru_state *gru, 254 struct gru_thread_state *gts) 255 { 256 struct gru_mm_struct *gms = gts->ts_gms; 257 struct gru_mm_tracker *asids; 258 unsigned short ctxbitmap; 259 260 asids = &gms->ms_asids[gru->gs_gid]; 261 ctxbitmap = (1 << gts->ts_ctxnum); 262 spin_lock(&gms->ms_asid_lock); 263 spin_lock(&gru->gs_asid_lock); 264 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); 265 asids->mt_ctxbitmap ^= ctxbitmap; 266 gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", 267 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]); 268 spin_unlock(&gru->gs_asid_lock); 269 spin_unlock(&gms->ms_asid_lock); 270 } 271 272 /* 273 * Decrement the reference count on a GTS structure. Free the structure 274 * if the reference count goes to zero. 275 */ 276 void gts_drop(struct gru_thread_state *gts) 277 { 278 if (gts && atomic_dec_return(>s->ts_refcnt) == 0) { 279 gru_drop_mmu_notifier(gts->ts_gms); 280 kfree(gts); 281 STAT(gts_free); 282 } 283 } 284 285 /* 286 * Locate the GTS structure for the current thread. 287 */ 288 static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data 289 *vdata, int tsid) 290 { 291 struct gru_thread_state *gts; 292 293 list_for_each_entry(gts, &vdata->vd_head, ts_next) 294 if (gts->ts_tsid == tsid) 295 return gts; 296 return NULL; 297 } 298 299 /* 300 * Allocate a thread state structure. 301 */ 302 struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, 303 int cbr_au_count, int dsr_au_count, int options, int tsid) 304 { 305 struct gru_thread_state *gts; 306 int bytes; 307 308 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count); 309 bytes += sizeof(struct gru_thread_state); 310 gts = kmalloc(bytes, GFP_KERNEL); 311 if (!gts) 312 return NULL; 313 314 STAT(gts_alloc); 315 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */ 316 atomic_set(>s->ts_refcnt, 1); 317 mutex_init(>s->ts_ctxlock); 318 gts->ts_cbr_au_count = cbr_au_count; 319 gts->ts_dsr_au_count = dsr_au_count; 320 gts->ts_user_options = options; 321 gts->ts_tsid = tsid; 322 gts->ts_ctxnum = NULLCTX; 323 gts->ts_tlb_int_select = -1; 324 gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT); 325 if (vma) { 326 gts->ts_mm = current->mm; 327 gts->ts_vma = vma; 328 gts->ts_gms = gru_register_mmu_notifier(); 329 if (!gts->ts_gms) 330 goto err; 331 } 332 333 gru_dbg(grudev, "alloc gts %p\n", gts); 334 return gts; 335 336 err: 337 gts_drop(gts); 338 return NULL; 339 } 340 341 /* 342 * Allocate a vma private data structure. 343 */ 344 struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid) 345 { 346 struct gru_vma_data *vdata = NULL; 347 348 vdata = kmalloc(sizeof(*vdata), GFP_KERNEL); 349 if (!vdata) 350 return NULL; 351 352 INIT_LIST_HEAD(&vdata->vd_head); 353 spin_lock_init(&vdata->vd_lock); 354 gru_dbg(grudev, "alloc vdata %p\n", vdata); 355 return vdata; 356 } 357 358 /* 359 * Find the thread state structure for the current thread. 360 */ 361 struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma, 362 int tsid) 363 { 364 struct gru_vma_data *vdata = vma->vm_private_data; 365 struct gru_thread_state *gts; 366 367 spin_lock(&vdata->vd_lock); 368 gts = gru_find_current_gts_nolock(vdata, tsid); 369 spin_unlock(&vdata->vd_lock); 370 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); 371 return gts; 372 } 373 374 /* 375 * Allocate a new thread state for a GSEG. Note that races may allow 376 * another thread to race to create a gts. 377 */ 378 struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, 379 int tsid) 380 { 381 struct gru_vma_data *vdata = vma->vm_private_data; 382 struct gru_thread_state *gts, *ngts; 383 384 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count, 385 vdata->vd_user_options, tsid); 386 if (!gts) 387 return NULL; 388 389 spin_lock(&vdata->vd_lock); 390 ngts = gru_find_current_gts_nolock(vdata, tsid); 391 if (ngts) { 392 gts_drop(gts); 393 gts = ngts; 394 STAT(gts_double_allocate); 395 } else { 396 list_add(>s->ts_next, &vdata->vd_head); 397 } 398 spin_unlock(&vdata->vd_lock); 399 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); 400 return gts; 401 } 402 403 /* 404 * Free the GRU context assigned to the thread state. 405 */ 406 static void gru_free_gru_context(struct gru_thread_state *gts) 407 { 408 struct gru_state *gru; 409 410 gru = gts->ts_gru; 411 gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid); 412 413 spin_lock(&gru->gs_lock); 414 gru->gs_gts[gts->ts_ctxnum] = NULL; 415 free_gru_resources(gru, gts); 416 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0); 417 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map); 418 gts->ts_ctxnum = NULLCTX; 419 gts->ts_gru = NULL; 420 gts->ts_blade = -1; 421 spin_unlock(&gru->gs_lock); 422 423 gts_drop(gts); 424 STAT(free_context); 425 } 426 427 /* 428 * Prefetching cachelines help hardware performance. 429 * (Strictly a performance enhancement. Not functionally required). 430 */ 431 static void prefetch_data(void *p, int num, int stride) 432 { 433 while (num-- > 0) { 434 prefetchw(p); 435 p += stride; 436 } 437 } 438 439 static inline long gru_copy_handle(void *d, void *s) 440 { 441 memcpy(d, s, GRU_HANDLE_BYTES); 442 return GRU_HANDLE_BYTES; 443 } 444 445 static void gru_prefetch_context(void *gseg, void *cb, void *cbe, 446 unsigned long cbrmap, unsigned long length) 447 { 448 int i, scr; 449 450 prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES, 451 GRU_CACHE_LINE_BYTES); 452 453 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 454 prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES); 455 prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1, 456 GRU_CACHE_LINE_BYTES); 457 cb += GRU_HANDLE_STRIDE; 458 } 459 } 460 461 static void gru_load_context_data(void *save, void *grubase, int ctxnum, 462 unsigned long cbrmap, unsigned long dsrmap, 463 int data_valid) 464 { 465 void *gseg, *cb, *cbe; 466 unsigned long length; 467 int i, scr; 468 469 gseg = grubase + ctxnum * GRU_GSEG_STRIDE; 470 cb = gseg + GRU_CB_BASE; 471 cbe = grubase + GRU_CBE_BASE; 472 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; 473 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 474 475 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 476 if (data_valid) { 477 save += gru_copy_handle(cb, save); 478 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, 479 save); 480 } else { 481 memset(cb, 0, GRU_CACHE_LINE_BYTES); 482 memset(cbe + i * GRU_HANDLE_STRIDE, 0, 483 GRU_CACHE_LINE_BYTES); 484 } 485 cb += GRU_HANDLE_STRIDE; 486 } 487 488 if (data_valid) 489 memcpy(gseg + GRU_DS_BASE, save, length); 490 else 491 memset(gseg + GRU_DS_BASE, 0, length); 492 } 493 494 static void gru_unload_context_data(void *save, void *grubase, int ctxnum, 495 unsigned long cbrmap, unsigned long dsrmap) 496 { 497 void *gseg, *cb, *cbe; 498 unsigned long length; 499 int i, scr; 500 501 gseg = grubase + ctxnum * GRU_GSEG_STRIDE; 502 cb = gseg + GRU_CB_BASE; 503 cbe = grubase + GRU_CBE_BASE; 504 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; 505 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 506 507 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 508 save += gru_copy_handle(save, cb); 509 save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE); 510 cb += GRU_HANDLE_STRIDE; 511 } 512 memcpy(save, gseg + GRU_DS_BASE, length); 513 } 514 515 void gru_unload_context(struct gru_thread_state *gts, int savestate) 516 { 517 struct gru_state *gru = gts->ts_gru; 518 struct gru_context_configuration_handle *cch; 519 int ctxnum = gts->ts_ctxnum; 520 521 if (!is_kernel_context(gts)) 522 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); 523 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 524 525 gru_dbg(grudev, "gts %p\n", gts); 526 lock_cch_handle(cch); 527 if (cch_interrupt_sync(cch)) 528 BUG(); 529 530 if (!is_kernel_context(gts)) 531 gru_unload_mm_tracker(gru, gts); 532 if (savestate) { 533 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, 534 ctxnum, gts->ts_cbr_map, 535 gts->ts_dsr_map); 536 gts->ts_data_valid = 1; 537 } 538 539 if (cch_deallocate(cch)) 540 BUG(); 541 gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */ 542 unlock_cch_handle(cch); 543 544 gru_free_gru_context(gts); 545 } 546 547 /* 548 * Load a GRU context by copying it from the thread data structure in memory 549 * to the GRU. 550 */ 551 void gru_load_context(struct gru_thread_state *gts) 552 { 553 struct gru_state *gru = gts->ts_gru; 554 struct gru_context_configuration_handle *cch; 555 int i, err, asid, ctxnum = gts->ts_ctxnum; 556 557 gru_dbg(grudev, "gts %p\n", gts); 558 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 559 560 lock_cch_handle(cch); 561 cch->tfm_fault_bit_enable = 562 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 563 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 564 cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 565 if (cch->tlb_int_enable) { 566 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 567 cch->tlb_int_select = gts->ts_tlb_int_select; 568 } 569 cch->tfm_done_bit_enable = 0; 570 cch->dsr_allocation_map = gts->ts_dsr_map; 571 cch->cbr_allocation_map = gts->ts_cbr_map; 572 573 if (is_kernel_context(gts)) { 574 cch->unmap_enable = 1; 575 cch->tfm_done_bit_enable = 1; 576 cch->cb_int_enable = 1; 577 } else { 578 cch->unmap_enable = 0; 579 cch->tfm_done_bit_enable = 0; 580 cch->cb_int_enable = 0; 581 asid = gru_load_mm_tracker(gru, gts); 582 for (i = 0; i < 8; i++) { 583 cch->asid[i] = asid + i; 584 cch->sizeavail[i] = gts->ts_sizeavail; 585 } 586 } 587 588 err = cch_allocate(cch); 589 if (err) { 590 gru_dbg(grudev, 591 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", 592 err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map); 593 BUG(); 594 } 595 596 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, 597 gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid); 598 599 if (cch_start(cch)) 600 BUG(); 601 unlock_cch_handle(cch); 602 } 603 604 /* 605 * Update fields in an active CCH: 606 * - retarget interrupts on local blade 607 * - update sizeavail mask 608 * - force a delayed context unload by clearing the CCH asids. This 609 * forces TLB misses for new GRU instructions. The context is unloaded 610 * when the next TLB miss occurs. 611 */ 612 int gru_update_cch(struct gru_thread_state *gts, int force_unload) 613 { 614 struct gru_context_configuration_handle *cch; 615 struct gru_state *gru = gts->ts_gru; 616 int i, ctxnum = gts->ts_ctxnum, ret = 0; 617 618 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 619 620 lock_cch_handle(cch); 621 if (cch->state == CCHSTATE_ACTIVE) { 622 if (gru->gs_gts[gts->ts_ctxnum] != gts) 623 goto exit; 624 if (cch_interrupt(cch)) 625 BUG(); 626 if (!force_unload) { 627 for (i = 0; i < 8; i++) 628 cch->sizeavail[i] = gts->ts_sizeavail; 629 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 630 cch->tlb_int_select = gru_cpu_fault_map_id(); 631 cch->tfm_fault_bit_enable = 632 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 633 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 634 } else { 635 for (i = 0; i < 8; i++) 636 cch->asid[i] = 0; 637 cch->tfm_fault_bit_enable = 0; 638 cch->tlb_int_enable = 0; 639 gts->ts_force_unload = 1; 640 } 641 if (cch_start(cch)) 642 BUG(); 643 ret = 1; 644 } 645 exit: 646 unlock_cch_handle(cch); 647 return ret; 648 } 649 650 /* 651 * Update CCH tlb interrupt select. Required when all the following is true: 652 * - task's GRU context is loaded into a GRU 653 * - task is using interrupt notification for TLB faults 654 * - task has migrated to a different cpu on the same blade where 655 * it was previously running. 656 */ 657 static int gru_retarget_intr(struct gru_thread_state *gts) 658 { 659 if (gts->ts_tlb_int_select < 0 660 || gts->ts_tlb_int_select == gru_cpu_fault_map_id()) 661 return 0; 662 663 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, 664 gru_cpu_fault_map_id()); 665 return gru_update_cch(gts, 0); 666 } 667 668 669 /* 670 * Insufficient GRU resources available on the local blade. Steal a context from 671 * a process. This is a hack until a _real_ resource scheduler is written.... 672 */ 673 #define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0) 674 #define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \ 675 ((g)+1) : &(b)->bs_grus[0]) 676 677 static int is_gts_stealable(struct gru_thread_state *gts, 678 struct gru_blade_state *bs) 679 { 680 if (is_kernel_context(gts)) 681 return down_write_trylock(&bs->bs_kgts_sema); 682 else 683 return mutex_trylock(>s->ts_ctxlock); 684 } 685 686 static void gts_stolen(struct gru_thread_state *gts, 687 struct gru_blade_state *bs) 688 { 689 if (is_kernel_context(gts)) { 690 up_write(&bs->bs_kgts_sema); 691 STAT(steal_kernel_context); 692 } else { 693 mutex_unlock(>s->ts_ctxlock); 694 STAT(steal_user_context); 695 } 696 } 697 698 void gru_steal_context(struct gru_thread_state *gts, int blade_id) 699 { 700 struct gru_blade_state *blade; 701 struct gru_state *gru, *gru0; 702 struct gru_thread_state *ngts = NULL; 703 int ctxnum, ctxnum0, flag = 0, cbr, dsr; 704 705 cbr = gts->ts_cbr_au_count; 706 dsr = gts->ts_dsr_au_count; 707 708 blade = gru_base[blade_id]; 709 spin_lock(&blade->bs_lock); 710 711 ctxnum = next_ctxnum(blade->bs_lru_ctxnum); 712 gru = blade->bs_lru_gru; 713 if (ctxnum == 0) 714 gru = next_gru(blade, gru); 715 ctxnum0 = ctxnum; 716 gru0 = gru; 717 while (1) { 718 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) 719 break; 720 spin_lock(&gru->gs_lock); 721 for (; ctxnum < GRU_NUM_CCH; ctxnum++) { 722 if (flag && gru == gru0 && ctxnum == ctxnum0) 723 break; 724 ngts = gru->gs_gts[ctxnum]; 725 /* 726 * We are grabbing locks out of order, so trylock is 727 * needed. GTSs are usually not locked, so the odds of 728 * success are high. If trylock fails, try to steal a 729 * different GSEG. 730 */ 731 if (ngts && is_gts_stealable(ngts, blade)) 732 break; 733 ngts = NULL; 734 flag = 1; 735 } 736 spin_unlock(&gru->gs_lock); 737 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0)) 738 break; 739 ctxnum = 0; 740 gru = next_gru(blade, gru); 741 } 742 blade->bs_lru_gru = gru; 743 blade->bs_lru_ctxnum = ctxnum; 744 spin_unlock(&blade->bs_lock); 745 746 if (ngts) { 747 ngts->ts_steal_jiffies = jiffies; 748 gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1); 749 gts_stolen(ngts, blade); 750 } else { 751 STAT(steal_context_failed); 752 } 753 gru_dbg(grudev, 754 "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;" 755 " avail cb %ld, ds %ld\n", 756 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map), 757 hweight64(gru->gs_dsr_map)); 758 } 759 760 /* 761 * Scan the GRUs on the local blade & assign a GRU context. 762 */ 763 struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts, 764 int blade) 765 { 766 struct gru_state *gru, *grux; 767 int i, max_active_contexts; 768 769 770 again: 771 gru = NULL; 772 max_active_contexts = GRU_NUM_CCH; 773 for_each_gru_on_blade(grux, blade, i) { 774 if (check_gru_resources(grux, gts->ts_cbr_au_count, 775 gts->ts_dsr_au_count, 776 max_active_contexts)) { 777 gru = grux; 778 max_active_contexts = grux->gs_active_contexts; 779 if (max_active_contexts == 0) 780 break; 781 } 782 } 783 784 if (gru) { 785 spin_lock(&gru->gs_lock); 786 if (!check_gru_resources(gru, gts->ts_cbr_au_count, 787 gts->ts_dsr_au_count, GRU_NUM_CCH)) { 788 spin_unlock(&gru->gs_lock); 789 goto again; 790 } 791 reserve_gru_resources(gru, gts); 792 gts->ts_gru = gru; 793 gts->ts_blade = gru->gs_blade_id; 794 gts->ts_ctxnum = 795 find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); 796 BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH); 797 atomic_inc(>s->ts_refcnt); 798 gru->gs_gts[gts->ts_ctxnum] = gts; 799 __set_bit(gts->ts_ctxnum, &gru->gs_context_map); 800 spin_unlock(&gru->gs_lock); 801 802 STAT(assign_context); 803 gru_dbg(grudev, 804 "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n", 805 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts, 806 gts->ts_gru->gs_gid, gts->ts_ctxnum, 807 gts->ts_cbr_au_count, gts->ts_dsr_au_count); 808 } else { 809 gru_dbg(grudev, "failed to allocate a GTS %s\n", ""); 810 STAT(assign_context_failed); 811 } 812 813 return gru; 814 } 815 816 /* 817 * gru_nopage 818 * 819 * Map the user's GRU segment 820 * 821 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries. 822 */ 823 int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 824 { 825 struct gru_thread_state *gts; 826 unsigned long paddr, vaddr; 827 int blade_id; 828 829 vaddr = (unsigned long)vmf->virtual_address; 830 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", 831 vma, vaddr, GSEG_BASE(vaddr)); 832 STAT(nopfn); 833 834 /* The following check ensures vaddr is a valid address in the VMA */ 835 gts = gru_find_thread_state(vma, TSID(vaddr, vma)); 836 if (!gts) 837 return VM_FAULT_SIGBUS; 838 839 again: 840 mutex_lock(>s->ts_ctxlock); 841 preempt_disable(); 842 blade_id = uv_numa_blade_id(); 843 844 if (gts->ts_gru) { 845 if (gts->ts_gru->gs_blade_id != blade_id) { 846 STAT(migrated_nopfn_unload); 847 gru_unload_context(gts, 1); 848 } else { 849 if (gru_retarget_intr(gts)) 850 STAT(migrated_nopfn_retarget); 851 } 852 } 853 854 if (!gts->ts_gru) { 855 STAT(load_user_context); 856 if (!gru_assign_gru_context(gts, blade_id)) { 857 preempt_enable(); 858 mutex_unlock(>s->ts_ctxlock); 859 set_current_state(TASK_INTERRUPTIBLE); 860 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ 861 blade_id = uv_numa_blade_id(); 862 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies) 863 gru_steal_context(gts, blade_id); 864 goto again; 865 } 866 gru_load_context(gts); 867 paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum); 868 remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1), 869 paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE, 870 vma->vm_page_prot); 871 } 872 873 preempt_enable(); 874 mutex_unlock(>s->ts_ctxlock); 875 876 return VM_FAULT_NOPAGE; 877 } 878 879