1 /* 2 * SN Platform GRU Driver 3 * 4 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD 5 * 6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/slab.h> 25 #include <linux/mm.h> 26 #include <linux/spinlock.h> 27 #include <linux/sched.h> 28 #include <linux/device.h> 29 #include <linux/list.h> 30 #include <asm/uv/uv_hub.h> 31 #include "gru.h" 32 #include "grutables.h" 33 #include "gruhandles.h" 34 35 unsigned long gru_options __read_mostly; 36 37 static struct device_driver gru_driver = { 38 .name = "gru" 39 }; 40 41 static struct device gru_device = { 42 .init_name = "", 43 .driver = &gru_driver, 44 }; 45 46 struct device *grudev = &gru_device; 47 48 /* 49 * Select a gru fault map to be used by the current cpu. Note that 50 * multiple cpus may be using the same map. 51 * ZZZ should "shift" be used?? Depends on HT cpu numbering 52 * ZZZ should be inline but did not work on emulator 53 */ 54 int gru_cpu_fault_map_id(void) 55 { 56 return uv_blade_processor_id() % GRU_NUM_TFM; 57 } 58 59 /*--------- ASID Management ------------------------------------------- 60 * 61 * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID. 62 * Once MAX is reached, flush the TLB & start over. However, 63 * some asids may still be in use. There won't be many (percentage wise) still 64 * in use. Search active contexts & determine the value of the first 65 * asid in use ("x"s below). Set "limit" to this value. 66 * This defines a block of assignable asids. 67 * 68 * When "limit" is reached, search forward from limit+1 and determine the 69 * next block of assignable asids. 70 * 71 * Repeat until MAX_ASID is reached, then start over again. 72 * 73 * Each time MAX_ASID is reached, increment the asid generation. Since 74 * the search for in-use asids only checks contexts with GRUs currently 75 * assigned, asids in some contexts will be missed. Prior to loading 76 * a context, the asid generation of the GTS asid is rechecked. If it 77 * doesn't match the current generation, a new asid will be assigned. 78 * 79 * 0---------------x------------x---------------------x----| 80 * ^-next ^-limit ^-MAX_ASID 81 * 82 * All asid manipulation & context loading/unloading is protected by the 83 * gs_lock. 84 */ 85 86 /* Hit the asid limit. Start over */ 87 static int gru_wrap_asid(struct gru_state *gru) 88 { 89 gru_dbg(grudev, "gid %d\n", gru->gs_gid); 90 STAT(asid_wrap); 91 gru->gs_asid_gen++; 92 return MIN_ASID; 93 } 94 95 /* Find the next chunk of unused asids */ 96 static int gru_reset_asid_limit(struct gru_state *gru, int asid) 97 { 98 int i, gid, inuse_asid, limit; 99 100 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); 101 STAT(asid_next); 102 limit = MAX_ASID; 103 if (asid >= limit) 104 asid = gru_wrap_asid(gru); 105 gru_flush_all_tlb(gru); 106 gid = gru->gs_gid; 107 again: 108 for (i = 0; i < GRU_NUM_CCH; i++) { 109 if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i])) 110 continue; 111 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; 112 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n", 113 gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms, 114 inuse_asid, i); 115 if (inuse_asid == asid) { 116 asid += ASID_INC; 117 if (asid >= limit) { 118 /* 119 * empty range: reset the range limit and 120 * start over 121 */ 122 limit = MAX_ASID; 123 if (asid >= MAX_ASID) 124 asid = gru_wrap_asid(gru); 125 goto again; 126 } 127 } 128 129 if ((inuse_asid > asid) && (inuse_asid < limit)) 130 limit = inuse_asid; 131 } 132 gru->gs_asid_limit = limit; 133 gru->gs_asid = asid; 134 gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid, 135 asid, limit); 136 return asid; 137 } 138 139 /* Assign a new ASID to a thread context. */ 140 static int gru_assign_asid(struct gru_state *gru) 141 { 142 int asid; 143 144 gru->gs_asid += ASID_INC; 145 asid = gru->gs_asid; 146 if (asid >= gru->gs_asid_limit) 147 asid = gru_reset_asid_limit(gru, asid); 148 149 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); 150 return asid; 151 } 152 153 /* 154 * Clear n bits in a word. Return a word indicating the bits that were cleared. 155 * Optionally, build an array of chars that contain the bit numbers allocated. 156 */ 157 static unsigned long reserve_resources(unsigned long *p, int n, int mmax, 158 char *idx) 159 { 160 unsigned long bits = 0; 161 int i; 162 163 while (n--) { 164 i = find_first_bit(p, mmax); 165 if (i == mmax) 166 BUG(); 167 __clear_bit(i, p); 168 __set_bit(i, &bits); 169 if (idx) 170 *idx++ = i; 171 } 172 return bits; 173 } 174 175 unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count, 176 char *cbmap) 177 { 178 return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU, 179 cbmap); 180 } 181 182 unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count, 183 char *dsmap) 184 { 185 return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU, 186 dsmap); 187 } 188 189 static void reserve_gru_resources(struct gru_state *gru, 190 struct gru_thread_state *gts) 191 { 192 gru->gs_active_contexts++; 193 gts->ts_cbr_map = 194 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count, 195 gts->ts_cbr_idx); 196 gts->ts_dsr_map = 197 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL); 198 } 199 200 static void free_gru_resources(struct gru_state *gru, 201 struct gru_thread_state *gts) 202 { 203 gru->gs_active_contexts--; 204 gru->gs_cbr_map |= gts->ts_cbr_map; 205 gru->gs_dsr_map |= gts->ts_dsr_map; 206 } 207 208 /* 209 * Check if a GRU has sufficient free resources to satisfy an allocation 210 * request. Note: GRU locks may or may not be held when this is called. If 211 * not held, recheck after acquiring the appropriate locks. 212 * 213 * Returns 1 if sufficient resources, 0 if not 214 */ 215 static int check_gru_resources(struct gru_state *gru, int cbr_au_count, 216 int dsr_au_count, int max_active_contexts) 217 { 218 return hweight64(gru->gs_cbr_map) >= cbr_au_count 219 && hweight64(gru->gs_dsr_map) >= dsr_au_count 220 && gru->gs_active_contexts < max_active_contexts; 221 } 222 223 /* 224 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG 225 * context. 226 */ 227 static int gru_load_mm_tracker(struct gru_state *gru, 228 struct gru_thread_state *gts) 229 { 230 struct gru_mm_struct *gms = gts->ts_gms; 231 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid]; 232 unsigned short ctxbitmap = (1 << gts->ts_ctxnum); 233 int asid; 234 235 spin_lock(&gms->ms_asid_lock); 236 asid = asids->mt_asid; 237 238 spin_lock(&gru->gs_asid_lock); 239 if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen != 240 gru->gs_asid_gen)) { 241 asid = gru_assign_asid(gru); 242 asids->mt_asid = asid; 243 asids->mt_asid_gen = gru->gs_asid_gen; 244 STAT(asid_new); 245 } else { 246 STAT(asid_reuse); 247 } 248 spin_unlock(&gru->gs_asid_lock); 249 250 BUG_ON(asids->mt_ctxbitmap & ctxbitmap); 251 asids->mt_ctxbitmap |= ctxbitmap; 252 if (!test_bit(gru->gs_gid, gms->ms_asidmap)) 253 __set_bit(gru->gs_gid, gms->ms_asidmap); 254 spin_unlock(&gms->ms_asid_lock); 255 256 gru_dbg(grudev, 257 "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n", 258 gru->gs_gid, gts, gms, gts->ts_ctxnum, asid, 259 gms->ms_asidmap[0]); 260 return asid; 261 } 262 263 static void gru_unload_mm_tracker(struct gru_state *gru, 264 struct gru_thread_state *gts) 265 { 266 struct gru_mm_struct *gms = gts->ts_gms; 267 struct gru_mm_tracker *asids; 268 unsigned short ctxbitmap; 269 270 asids = &gms->ms_asids[gru->gs_gid]; 271 ctxbitmap = (1 << gts->ts_ctxnum); 272 spin_lock(&gms->ms_asid_lock); 273 spin_lock(&gru->gs_asid_lock); 274 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); 275 asids->mt_ctxbitmap ^= ctxbitmap; 276 gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", 277 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]); 278 spin_unlock(&gru->gs_asid_lock); 279 spin_unlock(&gms->ms_asid_lock); 280 } 281 282 /* 283 * Decrement the reference count on a GTS structure. Free the structure 284 * if the reference count goes to zero. 285 */ 286 void gts_drop(struct gru_thread_state *gts) 287 { 288 if (gts && atomic_dec_return(>s->ts_refcnt) == 0) { 289 gru_drop_mmu_notifier(gts->ts_gms); 290 kfree(gts); 291 STAT(gts_free); 292 } 293 } 294 295 /* 296 * Locate the GTS structure for the current thread. 297 */ 298 static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data 299 *vdata, int tsid) 300 { 301 struct gru_thread_state *gts; 302 303 list_for_each_entry(gts, &vdata->vd_head, ts_next) 304 if (gts->ts_tsid == tsid) 305 return gts; 306 return NULL; 307 } 308 309 /* 310 * Allocate a thread state structure. 311 */ 312 struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, 313 int cbr_au_count, int dsr_au_count, int options, int tsid) 314 { 315 struct gru_thread_state *gts; 316 int bytes; 317 318 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count); 319 bytes += sizeof(struct gru_thread_state); 320 gts = kmalloc(bytes, GFP_KERNEL); 321 if (!gts) 322 return NULL; 323 324 STAT(gts_alloc); 325 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */ 326 atomic_set(>s->ts_refcnt, 1); 327 mutex_init(>s->ts_ctxlock); 328 gts->ts_cbr_au_count = cbr_au_count; 329 gts->ts_dsr_au_count = dsr_au_count; 330 gts->ts_user_options = options; 331 gts->ts_user_blade_id = -1; 332 gts->ts_user_chiplet_id = -1; 333 gts->ts_tsid = tsid; 334 gts->ts_ctxnum = NULLCTX; 335 gts->ts_tlb_int_select = -1; 336 gts->ts_cch_req_slice = -1; 337 gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT); 338 if (vma) { 339 gts->ts_mm = current->mm; 340 gts->ts_vma = vma; 341 gts->ts_gms = gru_register_mmu_notifier(); 342 if (!gts->ts_gms) 343 goto err; 344 } 345 346 gru_dbg(grudev, "alloc gts %p\n", gts); 347 return gts; 348 349 err: 350 gts_drop(gts); 351 return NULL; 352 } 353 354 /* 355 * Allocate a vma private data structure. 356 */ 357 struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid) 358 { 359 struct gru_vma_data *vdata = NULL; 360 361 vdata = kmalloc(sizeof(*vdata), GFP_KERNEL); 362 if (!vdata) 363 return NULL; 364 365 INIT_LIST_HEAD(&vdata->vd_head); 366 spin_lock_init(&vdata->vd_lock); 367 gru_dbg(grudev, "alloc vdata %p\n", vdata); 368 return vdata; 369 } 370 371 /* 372 * Find the thread state structure for the current thread. 373 */ 374 struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma, 375 int tsid) 376 { 377 struct gru_vma_data *vdata = vma->vm_private_data; 378 struct gru_thread_state *gts; 379 380 spin_lock(&vdata->vd_lock); 381 gts = gru_find_current_gts_nolock(vdata, tsid); 382 spin_unlock(&vdata->vd_lock); 383 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); 384 return gts; 385 } 386 387 /* 388 * Allocate a new thread state for a GSEG. Note that races may allow 389 * another thread to race to create a gts. 390 */ 391 struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, 392 int tsid) 393 { 394 struct gru_vma_data *vdata = vma->vm_private_data; 395 struct gru_thread_state *gts, *ngts; 396 397 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count, 398 vdata->vd_user_options, tsid); 399 if (!gts) 400 return NULL; 401 402 spin_lock(&vdata->vd_lock); 403 ngts = gru_find_current_gts_nolock(vdata, tsid); 404 if (ngts) { 405 gts_drop(gts); 406 gts = ngts; 407 STAT(gts_double_allocate); 408 } else { 409 list_add(>s->ts_next, &vdata->vd_head); 410 } 411 spin_unlock(&vdata->vd_lock); 412 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); 413 return gts; 414 } 415 416 /* 417 * Free the GRU context assigned to the thread state. 418 */ 419 static void gru_free_gru_context(struct gru_thread_state *gts) 420 { 421 struct gru_state *gru; 422 423 gru = gts->ts_gru; 424 gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid); 425 426 spin_lock(&gru->gs_lock); 427 gru->gs_gts[gts->ts_ctxnum] = NULL; 428 free_gru_resources(gru, gts); 429 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0); 430 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map); 431 gts->ts_ctxnum = NULLCTX; 432 gts->ts_gru = NULL; 433 gts->ts_blade = -1; 434 spin_unlock(&gru->gs_lock); 435 436 gts_drop(gts); 437 STAT(free_context); 438 } 439 440 /* 441 * Prefetching cachelines help hardware performance. 442 * (Strictly a performance enhancement. Not functionally required). 443 */ 444 static void prefetch_data(void *p, int num, int stride) 445 { 446 while (num-- > 0) { 447 prefetchw(p); 448 p += stride; 449 } 450 } 451 452 static inline long gru_copy_handle(void *d, void *s) 453 { 454 memcpy(d, s, GRU_HANDLE_BYTES); 455 return GRU_HANDLE_BYTES; 456 } 457 458 static void gru_prefetch_context(void *gseg, void *cb, void *cbe, 459 unsigned long cbrmap, unsigned long length) 460 { 461 int i, scr; 462 463 prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES, 464 GRU_CACHE_LINE_BYTES); 465 466 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 467 prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES); 468 prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1, 469 GRU_CACHE_LINE_BYTES); 470 cb += GRU_HANDLE_STRIDE; 471 } 472 } 473 474 static void gru_load_context_data(void *save, void *grubase, int ctxnum, 475 unsigned long cbrmap, unsigned long dsrmap, 476 int data_valid) 477 { 478 void *gseg, *cb, *cbe; 479 unsigned long length; 480 int i, scr; 481 482 gseg = grubase + ctxnum * GRU_GSEG_STRIDE; 483 cb = gseg + GRU_CB_BASE; 484 cbe = grubase + GRU_CBE_BASE; 485 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; 486 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 487 488 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 489 if (data_valid) { 490 save += gru_copy_handle(cb, save); 491 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, 492 save); 493 } else { 494 memset(cb, 0, GRU_CACHE_LINE_BYTES); 495 memset(cbe + i * GRU_HANDLE_STRIDE, 0, 496 GRU_CACHE_LINE_BYTES); 497 } 498 cb += GRU_HANDLE_STRIDE; 499 } 500 501 if (data_valid) 502 memcpy(gseg + GRU_DS_BASE, save, length); 503 else 504 memset(gseg + GRU_DS_BASE, 0, length); 505 } 506 507 static void gru_unload_context_data(void *save, void *grubase, int ctxnum, 508 unsigned long cbrmap, unsigned long dsrmap) 509 { 510 void *gseg, *cb, *cbe; 511 unsigned long length; 512 int i, scr; 513 514 gseg = grubase + ctxnum * GRU_GSEG_STRIDE; 515 cb = gseg + GRU_CB_BASE; 516 cbe = grubase + GRU_CBE_BASE; 517 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; 518 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 519 520 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 521 save += gru_copy_handle(save, cb); 522 save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE); 523 cb += GRU_HANDLE_STRIDE; 524 } 525 memcpy(save, gseg + GRU_DS_BASE, length); 526 } 527 528 void gru_unload_context(struct gru_thread_state *gts, int savestate) 529 { 530 struct gru_state *gru = gts->ts_gru; 531 struct gru_context_configuration_handle *cch; 532 int ctxnum = gts->ts_ctxnum; 533 534 if (!is_kernel_context(gts)) 535 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); 536 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 537 538 gru_dbg(grudev, "gts %p\n", gts); 539 lock_cch_handle(cch); 540 if (cch_interrupt_sync(cch)) 541 BUG(); 542 543 if (!is_kernel_context(gts)) 544 gru_unload_mm_tracker(gru, gts); 545 if (savestate) { 546 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, 547 ctxnum, gts->ts_cbr_map, 548 gts->ts_dsr_map); 549 gts->ts_data_valid = 1; 550 } 551 552 if (cch_deallocate(cch)) 553 BUG(); 554 gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */ 555 unlock_cch_handle(cch); 556 557 gru_free_gru_context(gts); 558 } 559 560 /* 561 * Load a GRU context by copying it from the thread data structure in memory 562 * to the GRU. 563 */ 564 void gru_load_context(struct gru_thread_state *gts) 565 { 566 struct gru_state *gru = gts->ts_gru; 567 struct gru_context_configuration_handle *cch; 568 int i, err, asid, ctxnum = gts->ts_ctxnum; 569 570 gru_dbg(grudev, "gts %p\n", gts); 571 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 572 573 lock_cch_handle(cch); 574 cch->tfm_fault_bit_enable = 575 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 576 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 577 cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 578 if (cch->tlb_int_enable) { 579 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 580 cch->tlb_int_select = gts->ts_tlb_int_select; 581 } 582 if (gts->ts_cch_req_slice >= 0) { 583 cch->req_slice_set_enable = 1; 584 cch->req_slice = gts->ts_cch_req_slice; 585 } else { 586 cch->req_slice_set_enable =0; 587 } 588 cch->tfm_done_bit_enable = 0; 589 cch->dsr_allocation_map = gts->ts_dsr_map; 590 cch->cbr_allocation_map = gts->ts_cbr_map; 591 592 if (is_kernel_context(gts)) { 593 cch->unmap_enable = 1; 594 cch->tfm_done_bit_enable = 1; 595 cch->cb_int_enable = 1; 596 } else { 597 cch->unmap_enable = 0; 598 cch->tfm_done_bit_enable = 0; 599 cch->cb_int_enable = 0; 600 asid = gru_load_mm_tracker(gru, gts); 601 for (i = 0; i < 8; i++) { 602 cch->asid[i] = asid + i; 603 cch->sizeavail[i] = gts->ts_sizeavail; 604 } 605 } 606 607 err = cch_allocate(cch); 608 if (err) { 609 gru_dbg(grudev, 610 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", 611 err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map); 612 BUG(); 613 } 614 615 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, 616 gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid); 617 618 if (cch_start(cch)) 619 BUG(); 620 unlock_cch_handle(cch); 621 } 622 623 /* 624 * Update fields in an active CCH: 625 * - retarget interrupts on local blade 626 * - update sizeavail mask 627 * - force a delayed context unload by clearing the CCH asids. This 628 * forces TLB misses for new GRU instructions. The context is unloaded 629 * when the next TLB miss occurs. 630 */ 631 int gru_update_cch(struct gru_thread_state *gts, int force_unload) 632 { 633 struct gru_context_configuration_handle *cch; 634 struct gru_state *gru = gts->ts_gru; 635 int i, ctxnum = gts->ts_ctxnum, ret = 0; 636 637 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 638 639 lock_cch_handle(cch); 640 if (cch->state == CCHSTATE_ACTIVE) { 641 if (gru->gs_gts[gts->ts_ctxnum] != gts) 642 goto exit; 643 if (cch_interrupt(cch)) 644 BUG(); 645 if (!force_unload) { 646 for (i = 0; i < 8; i++) 647 cch->sizeavail[i] = gts->ts_sizeavail; 648 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 649 cch->tlb_int_select = gru_cpu_fault_map_id(); 650 cch->tfm_fault_bit_enable = 651 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 652 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 653 } else { 654 for (i = 0; i < 8; i++) 655 cch->asid[i] = 0; 656 cch->tfm_fault_bit_enable = 0; 657 cch->tlb_int_enable = 0; 658 gts->ts_force_unload = 1; 659 } 660 if (cch_start(cch)) 661 BUG(); 662 ret = 1; 663 } 664 exit: 665 unlock_cch_handle(cch); 666 return ret; 667 } 668 669 /* 670 * Update CCH tlb interrupt select. Required when all the following is true: 671 * - task's GRU context is loaded into a GRU 672 * - task is using interrupt notification for TLB faults 673 * - task has migrated to a different cpu on the same blade where 674 * it was previously running. 675 */ 676 static int gru_retarget_intr(struct gru_thread_state *gts) 677 { 678 if (gts->ts_tlb_int_select < 0 679 || gts->ts_tlb_int_select == gru_cpu_fault_map_id()) 680 return 0; 681 682 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, 683 gru_cpu_fault_map_id()); 684 return gru_update_cch(gts, 0); 685 } 686 687 /* 688 * Unload the gru context if it is not assigned to the correct blade or 689 * chiplet. Misassignment can occur if the process migrates to a different 690 * blade or if the user changes the selected blade/chiplet. 691 * Return 0 if context correct placed, otherwise 1 692 */ 693 void gru_check_context_placement(struct gru_thread_state *gts) 694 { 695 struct gru_state *gru; 696 int blade_id, chiplet_id; 697 698 /* 699 * If the current task is the context owner, verify that the 700 * context is correctly placed. This test is skipped for non-owner 701 * references. Pthread apps use non-owner references to the CBRs. 702 */ 703 gru = gts->ts_gru; 704 if (!gru || gts->ts_tgid_owner != current->tgid) 705 return; 706 707 blade_id = gts->ts_user_blade_id; 708 if (blade_id < 0) 709 blade_id = uv_numa_blade_id(); 710 711 chiplet_id = gts->ts_user_chiplet_id; 712 if (gru->gs_blade_id != blade_id || 713 (chiplet_id >= 0 && chiplet_id != gru->gs_chiplet_id)) { 714 STAT(check_context_unload); 715 gru_unload_context(gts, 1); 716 } else if (gru_retarget_intr(gts)) { 717 STAT(check_context_retarget_intr); 718 } 719 } 720 721 722 /* 723 * Insufficient GRU resources available on the local blade. Steal a context from 724 * a process. This is a hack until a _real_ resource scheduler is written.... 725 */ 726 #define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0) 727 #define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \ 728 ((g)+1) : &(b)->bs_grus[0]) 729 730 static int is_gts_stealable(struct gru_thread_state *gts, 731 struct gru_blade_state *bs) 732 { 733 if (is_kernel_context(gts)) 734 return down_write_trylock(&bs->bs_kgts_sema); 735 else 736 return mutex_trylock(>s->ts_ctxlock); 737 } 738 739 static void gts_stolen(struct gru_thread_state *gts, 740 struct gru_blade_state *bs) 741 { 742 if (is_kernel_context(gts)) { 743 up_write(&bs->bs_kgts_sema); 744 STAT(steal_kernel_context); 745 } else { 746 mutex_unlock(>s->ts_ctxlock); 747 STAT(steal_user_context); 748 } 749 } 750 751 void gru_steal_context(struct gru_thread_state *gts) 752 { 753 struct gru_blade_state *blade; 754 struct gru_state *gru, *gru0; 755 struct gru_thread_state *ngts = NULL; 756 int ctxnum, ctxnum0, flag = 0, cbr, dsr; 757 int blade_id = gts->ts_user_blade_id; 758 int chiplet_id = gts->ts_user_chiplet_id; 759 760 if (blade_id < 0) 761 blade_id = uv_numa_blade_id(); 762 cbr = gts->ts_cbr_au_count; 763 dsr = gts->ts_dsr_au_count; 764 765 blade = gru_base[blade_id]; 766 spin_lock(&blade->bs_lock); 767 768 ctxnum = next_ctxnum(blade->bs_lru_ctxnum); 769 gru = blade->bs_lru_gru; 770 if (ctxnum == 0) 771 gru = next_gru(blade, gru); 772 blade->bs_lru_gru = gru; 773 blade->bs_lru_ctxnum = ctxnum; 774 ctxnum0 = ctxnum; 775 gru0 = gru; 776 while (1) { 777 if (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id) { 778 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) 779 break; 780 spin_lock(&gru->gs_lock); 781 for (; ctxnum < GRU_NUM_CCH; ctxnum++) { 782 if (flag && gru == gru0 && ctxnum == ctxnum0) 783 break; 784 ngts = gru->gs_gts[ctxnum]; 785 /* 786 * We are grabbing locks out of order, so trylock is 787 * needed. GTSs are usually not locked, so the odds of 788 * success are high. If trylock fails, try to steal a 789 * different GSEG. 790 */ 791 if (ngts && is_gts_stealable(ngts, blade)) 792 break; 793 ngts = NULL; 794 } 795 spin_unlock(&gru->gs_lock); 796 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0)) 797 break; 798 } 799 if (flag && gru == gru0) 800 break; 801 flag = 1; 802 ctxnum = 0; 803 gru = next_gru(blade, gru); 804 } 805 spin_unlock(&blade->bs_lock); 806 807 if (ngts) { 808 gts->ustats.context_stolen++; 809 ngts->ts_steal_jiffies = jiffies; 810 gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1); 811 gts_stolen(ngts, blade); 812 } else { 813 STAT(steal_context_failed); 814 } 815 gru_dbg(grudev, 816 "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;" 817 " avail cb %ld, ds %ld\n", 818 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map), 819 hweight64(gru->gs_dsr_map)); 820 } 821 822 /* 823 * Assign a gru context. 824 */ 825 static int gru_assign_context_number(struct gru_state *gru) 826 { 827 int ctxnum; 828 829 ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); 830 __set_bit(ctxnum, &gru->gs_context_map); 831 return ctxnum; 832 } 833 834 /* 835 * Scan the GRUs on the local blade & assign a GRU context. 836 */ 837 struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts) 838 { 839 struct gru_state *gru, *grux; 840 int i, max_active_contexts; 841 int blade_id = gts->ts_user_blade_id; 842 int chiplet_id = gts->ts_user_chiplet_id; 843 844 if (blade_id < 0) 845 blade_id = uv_numa_blade_id(); 846 again: 847 gru = NULL; 848 max_active_contexts = GRU_NUM_CCH; 849 for_each_gru_on_blade(grux, blade_id, i) { 850 if (chiplet_id >= 0 && chiplet_id != grux->gs_chiplet_id) 851 continue; 852 if (check_gru_resources(grux, gts->ts_cbr_au_count, 853 gts->ts_dsr_au_count, 854 max_active_contexts)) { 855 gru = grux; 856 max_active_contexts = grux->gs_active_contexts; 857 if (max_active_contexts == 0) 858 break; 859 } 860 } 861 862 if (gru) { 863 spin_lock(&gru->gs_lock); 864 if (!check_gru_resources(gru, gts->ts_cbr_au_count, 865 gts->ts_dsr_au_count, GRU_NUM_CCH)) { 866 spin_unlock(&gru->gs_lock); 867 goto again; 868 } 869 reserve_gru_resources(gru, gts); 870 gts->ts_gru = gru; 871 gts->ts_blade = gru->gs_blade_id; 872 gts->ts_ctxnum = gru_assign_context_number(gru); 873 atomic_inc(>s->ts_refcnt); 874 gru->gs_gts[gts->ts_ctxnum] = gts; 875 spin_unlock(&gru->gs_lock); 876 877 STAT(assign_context); 878 gru_dbg(grudev, 879 "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n", 880 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts, 881 gts->ts_gru->gs_gid, gts->ts_ctxnum, 882 gts->ts_cbr_au_count, gts->ts_dsr_au_count); 883 } else { 884 gru_dbg(grudev, "failed to allocate a GTS %s\n", ""); 885 STAT(assign_context_failed); 886 } 887 888 return gru; 889 } 890 891 /* 892 * gru_nopage 893 * 894 * Map the user's GRU segment 895 * 896 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries. 897 */ 898 int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 899 { 900 struct gru_thread_state *gts; 901 unsigned long paddr, vaddr; 902 903 vaddr = (unsigned long)vmf->virtual_address; 904 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", 905 vma, vaddr, GSEG_BASE(vaddr)); 906 STAT(nopfn); 907 908 /* The following check ensures vaddr is a valid address in the VMA */ 909 gts = gru_find_thread_state(vma, TSID(vaddr, vma)); 910 if (!gts) 911 return VM_FAULT_SIGBUS; 912 913 again: 914 mutex_lock(>s->ts_ctxlock); 915 preempt_disable(); 916 917 gru_check_context_placement(gts); 918 919 if (!gts->ts_gru) { 920 STAT(load_user_context); 921 if (!gru_assign_gru_context(gts)) { 922 preempt_enable(); 923 mutex_unlock(>s->ts_ctxlock); 924 set_current_state(TASK_INTERRUPTIBLE); 925 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ 926 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies) 927 gru_steal_context(gts); 928 goto again; 929 } 930 gru_load_context(gts); 931 paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum); 932 remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1), 933 paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE, 934 vma->vm_page_prot); 935 } 936 937 preempt_enable(); 938 mutex_unlock(>s->ts_ctxlock); 939 940 return VM_FAULT_NOPAGE; 941 } 942 943