1 /* 2 * SN Platform GRU Driver 3 * 4 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file "COPYING" in the main directory of this archive 8 * for more details. 9 * 10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/mm.h> 16 #include <linux/spinlock.h> 17 #include <linux/sched.h> 18 #include <linux/device.h> 19 #include <linux/list.h> 20 #include <asm/uv/uv_hub.h> 21 #include "gru.h" 22 #include "grutables.h" 23 #include "gruhandles.h" 24 25 unsigned long gru_options __read_mostly; 26 27 static struct device_driver gru_driver = { 28 .name = "gru" 29 }; 30 31 static struct device gru_device = { 32 .init_name = "", 33 .driver = &gru_driver, 34 }; 35 36 struct device *grudev = &gru_device; 37 38 /* 39 * Select a gru fault map to be used by the current cpu. Note that 40 * multiple cpus may be using the same map. 41 * ZZZ should "shift" be used?? Depends on HT cpu numbering 42 * ZZZ should be inline but did not work on emulator 43 */ 44 int gru_cpu_fault_map_id(void) 45 { 46 return uv_blade_processor_id() % GRU_NUM_TFM; 47 } 48 49 /*--------- ASID Management ------------------------------------------- 50 * 51 * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID. 52 * Once MAX is reached, flush the TLB & start over. However, 53 * some asids may still be in use. There won't be many (percentage wise) still 54 * in use. Search active contexts & determine the value of the first 55 * asid in use ("x"s below). Set "limit" to this value. 56 * This defines a block of assignable asids. 57 * 58 * When "limit" is reached, search forward from limit+1 and determine the 59 * next block of assignable asids. 60 * 61 * Repeat until MAX_ASID is reached, then start over again. 62 * 63 * Each time MAX_ASID is reached, increment the asid generation. Since 64 * the search for in-use asids only checks contexts with GRUs currently 65 * assigned, asids in some contexts will be missed. Prior to loading 66 * a context, the asid generation of the GTS asid is rechecked. If it 67 * doesn't match the current generation, a new asid will be assigned. 68 * 69 * 0---------------x------------x---------------------x----| 70 * ^-next ^-limit ^-MAX_ASID 71 * 72 * All asid manipulation & context loading/unloading is protected by the 73 * gs_lock. 74 */ 75 76 /* Hit the asid limit. Start over */ 77 static int gru_wrap_asid(struct gru_state *gru) 78 { 79 gru_dbg(grudev, "gid %d\n", gru->gs_gid); 80 STAT(asid_wrap); 81 gru->gs_asid_gen++; 82 return MIN_ASID; 83 } 84 85 /* Find the next chunk of unused asids */ 86 static int gru_reset_asid_limit(struct gru_state *gru, int asid) 87 { 88 int i, gid, inuse_asid, limit; 89 90 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); 91 STAT(asid_next); 92 limit = MAX_ASID; 93 if (asid >= limit) 94 asid = gru_wrap_asid(gru); 95 gru_flush_all_tlb(gru); 96 gid = gru->gs_gid; 97 again: 98 for (i = 0; i < GRU_NUM_CCH; i++) { 99 if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i])) 100 continue; 101 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; 102 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n", 103 gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms, 104 inuse_asid, i); 105 if (inuse_asid == asid) { 106 asid += ASID_INC; 107 if (asid >= limit) { 108 /* 109 * empty range: reset the range limit and 110 * start over 111 */ 112 limit = MAX_ASID; 113 if (asid >= MAX_ASID) 114 asid = gru_wrap_asid(gru); 115 goto again; 116 } 117 } 118 119 if ((inuse_asid > asid) && (inuse_asid < limit)) 120 limit = inuse_asid; 121 } 122 gru->gs_asid_limit = limit; 123 gru->gs_asid = asid; 124 gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid, 125 asid, limit); 126 return asid; 127 } 128 129 /* Assign a new ASID to a thread context. */ 130 static int gru_assign_asid(struct gru_state *gru) 131 { 132 int asid; 133 134 gru->gs_asid += ASID_INC; 135 asid = gru->gs_asid; 136 if (asid >= gru->gs_asid_limit) 137 asid = gru_reset_asid_limit(gru, asid); 138 139 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); 140 return asid; 141 } 142 143 /* 144 * Clear n bits in a word. Return a word indicating the bits that were cleared. 145 * Optionally, build an array of chars that contain the bit numbers allocated. 146 */ 147 static unsigned long reserve_resources(unsigned long *p, int n, int mmax, 148 char *idx) 149 { 150 unsigned long bits = 0; 151 int i; 152 153 while (n--) { 154 i = find_first_bit(p, mmax); 155 if (i == mmax) 156 BUG(); 157 __clear_bit(i, p); 158 __set_bit(i, &bits); 159 if (idx) 160 *idx++ = i; 161 } 162 return bits; 163 } 164 165 unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count, 166 char *cbmap) 167 { 168 return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU, 169 cbmap); 170 } 171 172 unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count, 173 char *dsmap) 174 { 175 return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU, 176 dsmap); 177 } 178 179 static void reserve_gru_resources(struct gru_state *gru, 180 struct gru_thread_state *gts) 181 { 182 gru->gs_active_contexts++; 183 gts->ts_cbr_map = 184 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count, 185 gts->ts_cbr_idx); 186 gts->ts_dsr_map = 187 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL); 188 } 189 190 static void free_gru_resources(struct gru_state *gru, 191 struct gru_thread_state *gts) 192 { 193 gru->gs_active_contexts--; 194 gru->gs_cbr_map |= gts->ts_cbr_map; 195 gru->gs_dsr_map |= gts->ts_dsr_map; 196 } 197 198 /* 199 * Check if a GRU has sufficient free resources to satisfy an allocation 200 * request. Note: GRU locks may or may not be held when this is called. If 201 * not held, recheck after acquiring the appropriate locks. 202 * 203 * Returns 1 if sufficient resources, 0 if not 204 */ 205 static int check_gru_resources(struct gru_state *gru, int cbr_au_count, 206 int dsr_au_count, int max_active_contexts) 207 { 208 return hweight64(gru->gs_cbr_map) >= cbr_au_count 209 && hweight64(gru->gs_dsr_map) >= dsr_au_count 210 && gru->gs_active_contexts < max_active_contexts; 211 } 212 213 /* 214 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG 215 * context. 216 */ 217 static int gru_load_mm_tracker(struct gru_state *gru, 218 struct gru_thread_state *gts) 219 { 220 struct gru_mm_struct *gms = gts->ts_gms; 221 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid]; 222 unsigned short ctxbitmap = (1 << gts->ts_ctxnum); 223 int asid; 224 225 spin_lock(&gms->ms_asid_lock); 226 asid = asids->mt_asid; 227 228 spin_lock(&gru->gs_asid_lock); 229 if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen != 230 gru->gs_asid_gen)) { 231 asid = gru_assign_asid(gru); 232 asids->mt_asid = asid; 233 asids->mt_asid_gen = gru->gs_asid_gen; 234 STAT(asid_new); 235 } else { 236 STAT(asid_reuse); 237 } 238 spin_unlock(&gru->gs_asid_lock); 239 240 BUG_ON(asids->mt_ctxbitmap & ctxbitmap); 241 asids->mt_ctxbitmap |= ctxbitmap; 242 if (!test_bit(gru->gs_gid, gms->ms_asidmap)) 243 __set_bit(gru->gs_gid, gms->ms_asidmap); 244 spin_unlock(&gms->ms_asid_lock); 245 246 gru_dbg(grudev, 247 "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n", 248 gru->gs_gid, gts, gms, gts->ts_ctxnum, asid, 249 gms->ms_asidmap[0]); 250 return asid; 251 } 252 253 static void gru_unload_mm_tracker(struct gru_state *gru, 254 struct gru_thread_state *gts) 255 { 256 struct gru_mm_struct *gms = gts->ts_gms; 257 struct gru_mm_tracker *asids; 258 unsigned short ctxbitmap; 259 260 asids = &gms->ms_asids[gru->gs_gid]; 261 ctxbitmap = (1 << gts->ts_ctxnum); 262 spin_lock(&gms->ms_asid_lock); 263 spin_lock(&gru->gs_asid_lock); 264 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); 265 asids->mt_ctxbitmap ^= ctxbitmap; 266 gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", 267 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]); 268 spin_unlock(&gru->gs_asid_lock); 269 spin_unlock(&gms->ms_asid_lock); 270 } 271 272 /* 273 * Decrement the reference count on a GTS structure. Free the structure 274 * if the reference count goes to zero. 275 */ 276 void gts_drop(struct gru_thread_state *gts) 277 { 278 if (gts && atomic_dec_return(>s->ts_refcnt) == 0) { 279 gru_drop_mmu_notifier(gts->ts_gms); 280 kfree(gts); 281 STAT(gts_free); 282 } 283 } 284 285 /* 286 * Locate the GTS structure for the current thread. 287 */ 288 static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data 289 *vdata, int tsid) 290 { 291 struct gru_thread_state *gts; 292 293 list_for_each_entry(gts, &vdata->vd_head, ts_next) 294 if (gts->ts_tsid == tsid) 295 return gts; 296 return NULL; 297 } 298 299 /* 300 * Allocate a thread state structure. 301 */ 302 struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, 303 int cbr_au_count, int dsr_au_count, int options, int tsid) 304 { 305 struct gru_thread_state *gts; 306 int bytes; 307 308 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count); 309 bytes += sizeof(struct gru_thread_state); 310 gts = kmalloc(bytes, GFP_KERNEL); 311 if (!gts) 312 return NULL; 313 314 STAT(gts_alloc); 315 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */ 316 atomic_set(>s->ts_refcnt, 1); 317 mutex_init(>s->ts_ctxlock); 318 gts->ts_cbr_au_count = cbr_au_count; 319 gts->ts_dsr_au_count = dsr_au_count; 320 gts->ts_user_options = options; 321 gts->ts_tsid = tsid; 322 gts->ts_ctxnum = NULLCTX; 323 gts->ts_tlb_int_select = -1; 324 gts->ts_cch_req_slice = -1; 325 gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT); 326 if (vma) { 327 gts->ts_mm = current->mm; 328 gts->ts_vma = vma; 329 gts->ts_gms = gru_register_mmu_notifier(); 330 if (!gts->ts_gms) 331 goto err; 332 } 333 334 gru_dbg(grudev, "alloc gts %p\n", gts); 335 return gts; 336 337 err: 338 gts_drop(gts); 339 return NULL; 340 } 341 342 /* 343 * Allocate a vma private data structure. 344 */ 345 struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid) 346 { 347 struct gru_vma_data *vdata = NULL; 348 349 vdata = kmalloc(sizeof(*vdata), GFP_KERNEL); 350 if (!vdata) 351 return NULL; 352 353 INIT_LIST_HEAD(&vdata->vd_head); 354 spin_lock_init(&vdata->vd_lock); 355 gru_dbg(grudev, "alloc vdata %p\n", vdata); 356 return vdata; 357 } 358 359 /* 360 * Find the thread state structure for the current thread. 361 */ 362 struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma, 363 int tsid) 364 { 365 struct gru_vma_data *vdata = vma->vm_private_data; 366 struct gru_thread_state *gts; 367 368 spin_lock(&vdata->vd_lock); 369 gts = gru_find_current_gts_nolock(vdata, tsid); 370 spin_unlock(&vdata->vd_lock); 371 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); 372 return gts; 373 } 374 375 /* 376 * Allocate a new thread state for a GSEG. Note that races may allow 377 * another thread to race to create a gts. 378 */ 379 struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, 380 int tsid) 381 { 382 struct gru_vma_data *vdata = vma->vm_private_data; 383 struct gru_thread_state *gts, *ngts; 384 385 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count, 386 vdata->vd_user_options, tsid); 387 if (!gts) 388 return NULL; 389 390 spin_lock(&vdata->vd_lock); 391 ngts = gru_find_current_gts_nolock(vdata, tsid); 392 if (ngts) { 393 gts_drop(gts); 394 gts = ngts; 395 STAT(gts_double_allocate); 396 } else { 397 list_add(>s->ts_next, &vdata->vd_head); 398 } 399 spin_unlock(&vdata->vd_lock); 400 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); 401 return gts; 402 } 403 404 /* 405 * Free the GRU context assigned to the thread state. 406 */ 407 static void gru_free_gru_context(struct gru_thread_state *gts) 408 { 409 struct gru_state *gru; 410 411 gru = gts->ts_gru; 412 gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid); 413 414 spin_lock(&gru->gs_lock); 415 gru->gs_gts[gts->ts_ctxnum] = NULL; 416 free_gru_resources(gru, gts); 417 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0); 418 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map); 419 gts->ts_ctxnum = NULLCTX; 420 gts->ts_gru = NULL; 421 gts->ts_blade = -1; 422 spin_unlock(&gru->gs_lock); 423 424 gts_drop(gts); 425 STAT(free_context); 426 } 427 428 /* 429 * Prefetching cachelines help hardware performance. 430 * (Strictly a performance enhancement. Not functionally required). 431 */ 432 static void prefetch_data(void *p, int num, int stride) 433 { 434 while (num-- > 0) { 435 prefetchw(p); 436 p += stride; 437 } 438 } 439 440 static inline long gru_copy_handle(void *d, void *s) 441 { 442 memcpy(d, s, GRU_HANDLE_BYTES); 443 return GRU_HANDLE_BYTES; 444 } 445 446 static void gru_prefetch_context(void *gseg, void *cb, void *cbe, 447 unsigned long cbrmap, unsigned long length) 448 { 449 int i, scr; 450 451 prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES, 452 GRU_CACHE_LINE_BYTES); 453 454 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 455 prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES); 456 prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1, 457 GRU_CACHE_LINE_BYTES); 458 cb += GRU_HANDLE_STRIDE; 459 } 460 } 461 462 static void gru_load_context_data(void *save, void *grubase, int ctxnum, 463 unsigned long cbrmap, unsigned long dsrmap, 464 int data_valid) 465 { 466 void *gseg, *cb, *cbe; 467 unsigned long length; 468 int i, scr; 469 470 gseg = grubase + ctxnum * GRU_GSEG_STRIDE; 471 cb = gseg + GRU_CB_BASE; 472 cbe = grubase + GRU_CBE_BASE; 473 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; 474 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 475 476 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 477 if (data_valid) { 478 save += gru_copy_handle(cb, save); 479 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, 480 save); 481 } else { 482 memset(cb, 0, GRU_CACHE_LINE_BYTES); 483 memset(cbe + i * GRU_HANDLE_STRIDE, 0, 484 GRU_CACHE_LINE_BYTES); 485 } 486 cb += GRU_HANDLE_STRIDE; 487 } 488 489 if (data_valid) 490 memcpy(gseg + GRU_DS_BASE, save, length); 491 else 492 memset(gseg + GRU_DS_BASE, 0, length); 493 } 494 495 static void gru_unload_context_data(void *save, void *grubase, int ctxnum, 496 unsigned long cbrmap, unsigned long dsrmap) 497 { 498 void *gseg, *cb, *cbe; 499 unsigned long length; 500 int i, scr; 501 502 gseg = grubase + ctxnum * GRU_GSEG_STRIDE; 503 cb = gseg + GRU_CB_BASE; 504 cbe = grubase + GRU_CBE_BASE; 505 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; 506 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 507 508 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 509 save += gru_copy_handle(save, cb); 510 save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE); 511 cb += GRU_HANDLE_STRIDE; 512 } 513 memcpy(save, gseg + GRU_DS_BASE, length); 514 } 515 516 void gru_unload_context(struct gru_thread_state *gts, int savestate) 517 { 518 struct gru_state *gru = gts->ts_gru; 519 struct gru_context_configuration_handle *cch; 520 int ctxnum = gts->ts_ctxnum; 521 522 if (!is_kernel_context(gts)) 523 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); 524 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 525 526 gru_dbg(grudev, "gts %p\n", gts); 527 lock_cch_handle(cch); 528 if (cch_interrupt_sync(cch)) 529 BUG(); 530 531 if (!is_kernel_context(gts)) 532 gru_unload_mm_tracker(gru, gts); 533 if (savestate) { 534 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, 535 ctxnum, gts->ts_cbr_map, 536 gts->ts_dsr_map); 537 gts->ts_data_valid = 1; 538 } 539 540 if (cch_deallocate(cch)) 541 BUG(); 542 gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */ 543 unlock_cch_handle(cch); 544 545 gru_free_gru_context(gts); 546 } 547 548 /* 549 * Load a GRU context by copying it from the thread data structure in memory 550 * to the GRU. 551 */ 552 void gru_load_context(struct gru_thread_state *gts) 553 { 554 struct gru_state *gru = gts->ts_gru; 555 struct gru_context_configuration_handle *cch; 556 int i, err, asid, ctxnum = gts->ts_ctxnum; 557 558 gru_dbg(grudev, "gts %p\n", gts); 559 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 560 561 lock_cch_handle(cch); 562 cch->tfm_fault_bit_enable = 563 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 564 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 565 cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 566 if (cch->tlb_int_enable) { 567 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 568 cch->tlb_int_select = gts->ts_tlb_int_select; 569 } 570 if (gts->ts_cch_req_slice >= 0) { 571 cch->req_slice_set_enable = 1; 572 cch->req_slice = gts->ts_cch_req_slice; 573 } else { 574 cch->req_slice_set_enable =0; 575 } 576 cch->tfm_done_bit_enable = 0; 577 cch->dsr_allocation_map = gts->ts_dsr_map; 578 cch->cbr_allocation_map = gts->ts_cbr_map; 579 580 if (is_kernel_context(gts)) { 581 cch->unmap_enable = 1; 582 cch->tfm_done_bit_enable = 1; 583 cch->cb_int_enable = 1; 584 } else { 585 cch->unmap_enable = 0; 586 cch->tfm_done_bit_enable = 0; 587 cch->cb_int_enable = 0; 588 asid = gru_load_mm_tracker(gru, gts); 589 for (i = 0; i < 8; i++) { 590 cch->asid[i] = asid + i; 591 cch->sizeavail[i] = gts->ts_sizeavail; 592 } 593 } 594 595 err = cch_allocate(cch); 596 if (err) { 597 gru_dbg(grudev, 598 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", 599 err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map); 600 BUG(); 601 } 602 603 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, 604 gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid); 605 606 if (cch_start(cch)) 607 BUG(); 608 unlock_cch_handle(cch); 609 } 610 611 /* 612 * Update fields in an active CCH: 613 * - retarget interrupts on local blade 614 * - update sizeavail mask 615 * - force a delayed context unload by clearing the CCH asids. This 616 * forces TLB misses for new GRU instructions. The context is unloaded 617 * when the next TLB miss occurs. 618 */ 619 int gru_update_cch(struct gru_thread_state *gts, int force_unload) 620 { 621 struct gru_context_configuration_handle *cch; 622 struct gru_state *gru = gts->ts_gru; 623 int i, ctxnum = gts->ts_ctxnum, ret = 0; 624 625 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 626 627 lock_cch_handle(cch); 628 if (cch->state == CCHSTATE_ACTIVE) { 629 if (gru->gs_gts[gts->ts_ctxnum] != gts) 630 goto exit; 631 if (cch_interrupt(cch)) 632 BUG(); 633 if (!force_unload) { 634 for (i = 0; i < 8; i++) 635 cch->sizeavail[i] = gts->ts_sizeavail; 636 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 637 cch->tlb_int_select = gru_cpu_fault_map_id(); 638 cch->tfm_fault_bit_enable = 639 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 640 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 641 } else { 642 for (i = 0; i < 8; i++) 643 cch->asid[i] = 0; 644 cch->tfm_fault_bit_enable = 0; 645 cch->tlb_int_enable = 0; 646 gts->ts_force_unload = 1; 647 } 648 if (cch_start(cch)) 649 BUG(); 650 ret = 1; 651 } 652 exit: 653 unlock_cch_handle(cch); 654 return ret; 655 } 656 657 /* 658 * Update CCH tlb interrupt select. Required when all the following is true: 659 * - task's GRU context is loaded into a GRU 660 * - task is using interrupt notification for TLB faults 661 * - task has migrated to a different cpu on the same blade where 662 * it was previously running. 663 */ 664 static int gru_retarget_intr(struct gru_thread_state *gts) 665 { 666 if (gts->ts_tlb_int_select < 0 667 || gts->ts_tlb_int_select == gru_cpu_fault_map_id()) 668 return 0; 669 670 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, 671 gru_cpu_fault_map_id()); 672 return gru_update_cch(gts, 0); 673 } 674 675 676 /* 677 * Insufficient GRU resources available on the local blade. Steal a context from 678 * a process. This is a hack until a _real_ resource scheduler is written.... 679 */ 680 #define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0) 681 #define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \ 682 ((g)+1) : &(b)->bs_grus[0]) 683 684 static int is_gts_stealable(struct gru_thread_state *gts, 685 struct gru_blade_state *bs) 686 { 687 if (is_kernel_context(gts)) 688 return down_write_trylock(&bs->bs_kgts_sema); 689 else 690 return mutex_trylock(>s->ts_ctxlock); 691 } 692 693 static void gts_stolen(struct gru_thread_state *gts, 694 struct gru_blade_state *bs) 695 { 696 if (is_kernel_context(gts)) { 697 up_write(&bs->bs_kgts_sema); 698 STAT(steal_kernel_context); 699 } else { 700 mutex_unlock(>s->ts_ctxlock); 701 STAT(steal_user_context); 702 } 703 } 704 705 void gru_steal_context(struct gru_thread_state *gts, int blade_id) 706 { 707 struct gru_blade_state *blade; 708 struct gru_state *gru, *gru0; 709 struct gru_thread_state *ngts = NULL; 710 int ctxnum, ctxnum0, flag = 0, cbr, dsr; 711 712 cbr = gts->ts_cbr_au_count; 713 dsr = gts->ts_dsr_au_count; 714 715 blade = gru_base[blade_id]; 716 spin_lock(&blade->bs_lock); 717 718 ctxnum = next_ctxnum(blade->bs_lru_ctxnum); 719 gru = blade->bs_lru_gru; 720 if (ctxnum == 0) 721 gru = next_gru(blade, gru); 722 ctxnum0 = ctxnum; 723 gru0 = gru; 724 while (1) { 725 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) 726 break; 727 spin_lock(&gru->gs_lock); 728 for (; ctxnum < GRU_NUM_CCH; ctxnum++) { 729 if (flag && gru == gru0 && ctxnum == ctxnum0) 730 break; 731 ngts = gru->gs_gts[ctxnum]; 732 /* 733 * We are grabbing locks out of order, so trylock is 734 * needed. GTSs are usually not locked, so the odds of 735 * success are high. If trylock fails, try to steal a 736 * different GSEG. 737 */ 738 if (ngts && is_gts_stealable(ngts, blade)) 739 break; 740 ngts = NULL; 741 flag = 1; 742 } 743 spin_unlock(&gru->gs_lock); 744 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0)) 745 break; 746 ctxnum = 0; 747 gru = next_gru(blade, gru); 748 } 749 blade->bs_lru_gru = gru; 750 blade->bs_lru_ctxnum = ctxnum; 751 spin_unlock(&blade->bs_lock); 752 753 if (ngts) { 754 gts->ustats.context_stolen++; 755 ngts->ts_steal_jiffies = jiffies; 756 gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1); 757 gts_stolen(ngts, blade); 758 } else { 759 STAT(steal_context_failed); 760 } 761 gru_dbg(grudev, 762 "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;" 763 " avail cb %ld, ds %ld\n", 764 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map), 765 hweight64(gru->gs_dsr_map)); 766 } 767 768 /* 769 * Scan the GRUs on the local blade & assign a GRU context. 770 */ 771 struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts, 772 int blade) 773 { 774 struct gru_state *gru, *grux; 775 int i, max_active_contexts; 776 777 778 again: 779 gru = NULL; 780 max_active_contexts = GRU_NUM_CCH; 781 for_each_gru_on_blade(grux, blade, i) { 782 if (check_gru_resources(grux, gts->ts_cbr_au_count, 783 gts->ts_dsr_au_count, 784 max_active_contexts)) { 785 gru = grux; 786 max_active_contexts = grux->gs_active_contexts; 787 if (max_active_contexts == 0) 788 break; 789 } 790 } 791 792 if (gru) { 793 spin_lock(&gru->gs_lock); 794 if (!check_gru_resources(gru, gts->ts_cbr_au_count, 795 gts->ts_dsr_au_count, GRU_NUM_CCH)) { 796 spin_unlock(&gru->gs_lock); 797 goto again; 798 } 799 reserve_gru_resources(gru, gts); 800 gts->ts_gru = gru; 801 gts->ts_blade = gru->gs_blade_id; 802 gts->ts_ctxnum = 803 find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); 804 BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH); 805 atomic_inc(>s->ts_refcnt); 806 gru->gs_gts[gts->ts_ctxnum] = gts; 807 __set_bit(gts->ts_ctxnum, &gru->gs_context_map); 808 spin_unlock(&gru->gs_lock); 809 810 STAT(assign_context); 811 gru_dbg(grudev, 812 "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n", 813 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts, 814 gts->ts_gru->gs_gid, gts->ts_ctxnum, 815 gts->ts_cbr_au_count, gts->ts_dsr_au_count); 816 } else { 817 gru_dbg(grudev, "failed to allocate a GTS %s\n", ""); 818 STAT(assign_context_failed); 819 } 820 821 return gru; 822 } 823 824 /* 825 * gru_nopage 826 * 827 * Map the user's GRU segment 828 * 829 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries. 830 */ 831 int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 832 { 833 struct gru_thread_state *gts; 834 unsigned long paddr, vaddr; 835 int blade_id; 836 837 vaddr = (unsigned long)vmf->virtual_address; 838 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", 839 vma, vaddr, GSEG_BASE(vaddr)); 840 STAT(nopfn); 841 842 /* The following check ensures vaddr is a valid address in the VMA */ 843 gts = gru_find_thread_state(vma, TSID(vaddr, vma)); 844 if (!gts) 845 return VM_FAULT_SIGBUS; 846 847 again: 848 mutex_lock(>s->ts_ctxlock); 849 preempt_disable(); 850 blade_id = uv_numa_blade_id(); 851 852 if (gts->ts_gru) { 853 if (gts->ts_gru->gs_blade_id != blade_id) { 854 STAT(migrated_nopfn_unload); 855 gru_unload_context(gts, 1); 856 } else { 857 if (gru_retarget_intr(gts)) 858 STAT(migrated_nopfn_retarget); 859 } 860 } 861 862 if (!gts->ts_gru) { 863 STAT(load_user_context); 864 if (!gru_assign_gru_context(gts, blade_id)) { 865 preempt_enable(); 866 mutex_unlock(>s->ts_ctxlock); 867 set_current_state(TASK_INTERRUPTIBLE); 868 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ 869 blade_id = uv_numa_blade_id(); 870 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies) 871 gru_steal_context(gts, blade_id); 872 goto again; 873 } 874 gru_load_context(gts); 875 paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum); 876 remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1), 877 paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE, 878 vma->vm_page_prot); 879 } 880 881 preempt_enable(); 882 mutex_unlock(>s->ts_ctxlock); 883 884 return VM_FAULT_NOPAGE; 885 } 886 887