1 /* 2 * SN Platform GRU Driver 3 * 4 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD 5 * 6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/slab.h> 25 #include <linux/mm.h> 26 #include <linux/spinlock.h> 27 #include <linux/sched.h> 28 #include <linux/device.h> 29 #include <linux/list.h> 30 #include <linux/err.h> 31 #include <asm/uv/uv_hub.h> 32 #include "gru.h" 33 #include "grutables.h" 34 #include "gruhandles.h" 35 36 unsigned long gru_options __read_mostly; 37 38 static struct device_driver gru_driver = { 39 .name = "gru" 40 }; 41 42 static struct device gru_device = { 43 .init_name = "", 44 .driver = &gru_driver, 45 }; 46 47 struct device *grudev = &gru_device; 48 49 /* 50 * Select a gru fault map to be used by the current cpu. Note that 51 * multiple cpus may be using the same map. 52 * ZZZ should be inline but did not work on emulator 53 */ 54 int gru_cpu_fault_map_id(void) 55 { 56 int cpu = smp_processor_id(); 57 int id, core; 58 59 core = uv_cpu_core_number(cpu); 60 id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu); 61 return id; 62 } 63 64 /*--------- ASID Management ------------------------------------------- 65 * 66 * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID. 67 * Once MAX is reached, flush the TLB & start over. However, 68 * some asids may still be in use. There won't be many (percentage wise) still 69 * in use. Search active contexts & determine the value of the first 70 * asid in use ("x"s below). Set "limit" to this value. 71 * This defines a block of assignable asids. 72 * 73 * When "limit" is reached, search forward from limit+1 and determine the 74 * next block of assignable asids. 75 * 76 * Repeat until MAX_ASID is reached, then start over again. 77 * 78 * Each time MAX_ASID is reached, increment the asid generation. Since 79 * the search for in-use asids only checks contexts with GRUs currently 80 * assigned, asids in some contexts will be missed. Prior to loading 81 * a context, the asid generation of the GTS asid is rechecked. If it 82 * doesn't match the current generation, a new asid will be assigned. 83 * 84 * 0---------------x------------x---------------------x----| 85 * ^-next ^-limit ^-MAX_ASID 86 * 87 * All asid manipulation & context loading/unloading is protected by the 88 * gs_lock. 89 */ 90 91 /* Hit the asid limit. Start over */ 92 static int gru_wrap_asid(struct gru_state *gru) 93 { 94 gru_dbg(grudev, "gid %d\n", gru->gs_gid); 95 STAT(asid_wrap); 96 gru->gs_asid_gen++; 97 return MIN_ASID; 98 } 99 100 /* Find the next chunk of unused asids */ 101 static int gru_reset_asid_limit(struct gru_state *gru, int asid) 102 { 103 int i, gid, inuse_asid, limit; 104 105 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); 106 STAT(asid_next); 107 limit = MAX_ASID; 108 if (asid >= limit) 109 asid = gru_wrap_asid(gru); 110 gru_flush_all_tlb(gru); 111 gid = gru->gs_gid; 112 again: 113 for (i = 0; i < GRU_NUM_CCH; i++) { 114 if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i])) 115 continue; 116 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; 117 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n", 118 gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms, 119 inuse_asid, i); 120 if (inuse_asid == asid) { 121 asid += ASID_INC; 122 if (asid >= limit) { 123 /* 124 * empty range: reset the range limit and 125 * start over 126 */ 127 limit = MAX_ASID; 128 if (asid >= MAX_ASID) 129 asid = gru_wrap_asid(gru); 130 goto again; 131 } 132 } 133 134 if ((inuse_asid > asid) && (inuse_asid < limit)) 135 limit = inuse_asid; 136 } 137 gru->gs_asid_limit = limit; 138 gru->gs_asid = asid; 139 gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid, 140 asid, limit); 141 return asid; 142 } 143 144 /* Assign a new ASID to a thread context. */ 145 static int gru_assign_asid(struct gru_state *gru) 146 { 147 int asid; 148 149 gru->gs_asid += ASID_INC; 150 asid = gru->gs_asid; 151 if (asid >= gru->gs_asid_limit) 152 asid = gru_reset_asid_limit(gru, asid); 153 154 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); 155 return asid; 156 } 157 158 /* 159 * Clear n bits in a word. Return a word indicating the bits that were cleared. 160 * Optionally, build an array of chars that contain the bit numbers allocated. 161 */ 162 static unsigned long reserve_resources(unsigned long *p, int n, int mmax, 163 char *idx) 164 { 165 unsigned long bits = 0; 166 int i; 167 168 while (n--) { 169 i = find_first_bit(p, mmax); 170 if (i == mmax) 171 BUG(); 172 __clear_bit(i, p); 173 __set_bit(i, &bits); 174 if (idx) 175 *idx++ = i; 176 } 177 return bits; 178 } 179 180 unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count, 181 char *cbmap) 182 { 183 return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU, 184 cbmap); 185 } 186 187 unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count, 188 char *dsmap) 189 { 190 return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU, 191 dsmap); 192 } 193 194 static void reserve_gru_resources(struct gru_state *gru, 195 struct gru_thread_state *gts) 196 { 197 gru->gs_active_contexts++; 198 gts->ts_cbr_map = 199 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count, 200 gts->ts_cbr_idx); 201 gts->ts_dsr_map = 202 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL); 203 } 204 205 static void free_gru_resources(struct gru_state *gru, 206 struct gru_thread_state *gts) 207 { 208 gru->gs_active_contexts--; 209 gru->gs_cbr_map |= gts->ts_cbr_map; 210 gru->gs_dsr_map |= gts->ts_dsr_map; 211 } 212 213 /* 214 * Check if a GRU has sufficient free resources to satisfy an allocation 215 * request. Note: GRU locks may or may not be held when this is called. If 216 * not held, recheck after acquiring the appropriate locks. 217 * 218 * Returns 1 if sufficient resources, 0 if not 219 */ 220 static int check_gru_resources(struct gru_state *gru, int cbr_au_count, 221 int dsr_au_count, int max_active_contexts) 222 { 223 return hweight64(gru->gs_cbr_map) >= cbr_au_count 224 && hweight64(gru->gs_dsr_map) >= dsr_au_count 225 && gru->gs_active_contexts < max_active_contexts; 226 } 227 228 /* 229 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG 230 * context. 231 */ 232 static int gru_load_mm_tracker(struct gru_state *gru, 233 struct gru_thread_state *gts) 234 { 235 struct gru_mm_struct *gms = gts->ts_gms; 236 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid]; 237 unsigned short ctxbitmap = (1 << gts->ts_ctxnum); 238 int asid; 239 240 spin_lock(&gms->ms_asid_lock); 241 asid = asids->mt_asid; 242 243 spin_lock(&gru->gs_asid_lock); 244 if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen != 245 gru->gs_asid_gen)) { 246 asid = gru_assign_asid(gru); 247 asids->mt_asid = asid; 248 asids->mt_asid_gen = gru->gs_asid_gen; 249 STAT(asid_new); 250 } else { 251 STAT(asid_reuse); 252 } 253 spin_unlock(&gru->gs_asid_lock); 254 255 BUG_ON(asids->mt_ctxbitmap & ctxbitmap); 256 asids->mt_ctxbitmap |= ctxbitmap; 257 if (!test_bit(gru->gs_gid, gms->ms_asidmap)) 258 __set_bit(gru->gs_gid, gms->ms_asidmap); 259 spin_unlock(&gms->ms_asid_lock); 260 261 gru_dbg(grudev, 262 "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n", 263 gru->gs_gid, gts, gms, gts->ts_ctxnum, asid, 264 gms->ms_asidmap[0]); 265 return asid; 266 } 267 268 static void gru_unload_mm_tracker(struct gru_state *gru, 269 struct gru_thread_state *gts) 270 { 271 struct gru_mm_struct *gms = gts->ts_gms; 272 struct gru_mm_tracker *asids; 273 unsigned short ctxbitmap; 274 275 asids = &gms->ms_asids[gru->gs_gid]; 276 ctxbitmap = (1 << gts->ts_ctxnum); 277 spin_lock(&gms->ms_asid_lock); 278 spin_lock(&gru->gs_asid_lock); 279 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); 280 asids->mt_ctxbitmap ^= ctxbitmap; 281 gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", 282 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]); 283 spin_unlock(&gru->gs_asid_lock); 284 spin_unlock(&gms->ms_asid_lock); 285 } 286 287 /* 288 * Decrement the reference count on a GTS structure. Free the structure 289 * if the reference count goes to zero. 290 */ 291 void gts_drop(struct gru_thread_state *gts) 292 { 293 if (gts && atomic_dec_return(>s->ts_refcnt) == 0) { 294 if (gts->ts_gms) 295 gru_drop_mmu_notifier(gts->ts_gms); 296 kfree(gts); 297 STAT(gts_free); 298 } 299 } 300 301 /* 302 * Locate the GTS structure for the current thread. 303 */ 304 static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data 305 *vdata, int tsid) 306 { 307 struct gru_thread_state *gts; 308 309 list_for_each_entry(gts, &vdata->vd_head, ts_next) 310 if (gts->ts_tsid == tsid) 311 return gts; 312 return NULL; 313 } 314 315 /* 316 * Allocate a thread state structure. 317 */ 318 struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, 319 int cbr_au_count, int dsr_au_count, 320 unsigned char tlb_preload_count, int options, int tsid) 321 { 322 struct gru_thread_state *gts; 323 struct gru_mm_struct *gms; 324 int bytes; 325 326 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count); 327 bytes += sizeof(struct gru_thread_state); 328 gts = kmalloc(bytes, GFP_KERNEL); 329 if (!gts) 330 return ERR_PTR(-ENOMEM); 331 332 STAT(gts_alloc); 333 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */ 334 atomic_set(>s->ts_refcnt, 1); 335 mutex_init(>s->ts_ctxlock); 336 gts->ts_cbr_au_count = cbr_au_count; 337 gts->ts_dsr_au_count = dsr_au_count; 338 gts->ts_tlb_preload_count = tlb_preload_count; 339 gts->ts_user_options = options; 340 gts->ts_user_blade_id = -1; 341 gts->ts_user_chiplet_id = -1; 342 gts->ts_tsid = tsid; 343 gts->ts_ctxnum = NULLCTX; 344 gts->ts_tlb_int_select = -1; 345 gts->ts_cch_req_slice = -1; 346 gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT); 347 if (vma) { 348 gts->ts_mm = current->mm; 349 gts->ts_vma = vma; 350 gms = gru_register_mmu_notifier(); 351 if (IS_ERR(gms)) 352 goto err; 353 gts->ts_gms = gms; 354 } 355 356 gru_dbg(grudev, "alloc gts %p\n", gts); 357 return gts; 358 359 err: 360 gts_drop(gts); 361 return ERR_CAST(gms); 362 } 363 364 /* 365 * Allocate a vma private data structure. 366 */ 367 struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid) 368 { 369 struct gru_vma_data *vdata = NULL; 370 371 vdata = kmalloc(sizeof(*vdata), GFP_KERNEL); 372 if (!vdata) 373 return NULL; 374 375 STAT(vdata_alloc); 376 INIT_LIST_HEAD(&vdata->vd_head); 377 spin_lock_init(&vdata->vd_lock); 378 gru_dbg(grudev, "alloc vdata %p\n", vdata); 379 return vdata; 380 } 381 382 /* 383 * Find the thread state structure for the current thread. 384 */ 385 struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma, 386 int tsid) 387 { 388 struct gru_vma_data *vdata = vma->vm_private_data; 389 struct gru_thread_state *gts; 390 391 spin_lock(&vdata->vd_lock); 392 gts = gru_find_current_gts_nolock(vdata, tsid); 393 spin_unlock(&vdata->vd_lock); 394 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); 395 return gts; 396 } 397 398 /* 399 * Allocate a new thread state for a GSEG. Note that races may allow 400 * another thread to race to create a gts. 401 */ 402 struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, 403 int tsid) 404 { 405 struct gru_vma_data *vdata = vma->vm_private_data; 406 struct gru_thread_state *gts, *ngts; 407 408 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, 409 vdata->vd_dsr_au_count, 410 vdata->vd_tlb_preload_count, 411 vdata->vd_user_options, tsid); 412 if (IS_ERR(gts)) 413 return gts; 414 415 spin_lock(&vdata->vd_lock); 416 ngts = gru_find_current_gts_nolock(vdata, tsid); 417 if (ngts) { 418 gts_drop(gts); 419 gts = ngts; 420 STAT(gts_double_allocate); 421 } else { 422 list_add(>s->ts_next, &vdata->vd_head); 423 } 424 spin_unlock(&vdata->vd_lock); 425 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); 426 return gts; 427 } 428 429 /* 430 * Free the GRU context assigned to the thread state. 431 */ 432 static void gru_free_gru_context(struct gru_thread_state *gts) 433 { 434 struct gru_state *gru; 435 436 gru = gts->ts_gru; 437 gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid); 438 439 spin_lock(&gru->gs_lock); 440 gru->gs_gts[gts->ts_ctxnum] = NULL; 441 free_gru_resources(gru, gts); 442 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0); 443 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map); 444 gts->ts_ctxnum = NULLCTX; 445 gts->ts_gru = NULL; 446 gts->ts_blade = -1; 447 spin_unlock(&gru->gs_lock); 448 449 gts_drop(gts); 450 STAT(free_context); 451 } 452 453 /* 454 * Prefetching cachelines help hardware performance. 455 * (Strictly a performance enhancement. Not functionally required). 456 */ 457 static void prefetch_data(void *p, int num, int stride) 458 { 459 while (num-- > 0) { 460 prefetchw(p); 461 p += stride; 462 } 463 } 464 465 static inline long gru_copy_handle(void *d, void *s) 466 { 467 memcpy(d, s, GRU_HANDLE_BYTES); 468 return GRU_HANDLE_BYTES; 469 } 470 471 static void gru_prefetch_context(void *gseg, void *cb, void *cbe, 472 unsigned long cbrmap, unsigned long length) 473 { 474 int i, scr; 475 476 prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES, 477 GRU_CACHE_LINE_BYTES); 478 479 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 480 prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES); 481 prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1, 482 GRU_CACHE_LINE_BYTES); 483 cb += GRU_HANDLE_STRIDE; 484 } 485 } 486 487 static void gru_load_context_data(void *save, void *grubase, int ctxnum, 488 unsigned long cbrmap, unsigned long dsrmap, 489 int data_valid) 490 { 491 void *gseg, *cb, *cbe; 492 unsigned long length; 493 int i, scr; 494 495 gseg = grubase + ctxnum * GRU_GSEG_STRIDE; 496 cb = gseg + GRU_CB_BASE; 497 cbe = grubase + GRU_CBE_BASE; 498 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; 499 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 500 501 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 502 if (data_valid) { 503 save += gru_copy_handle(cb, save); 504 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, 505 save); 506 } else { 507 memset(cb, 0, GRU_CACHE_LINE_BYTES); 508 memset(cbe + i * GRU_HANDLE_STRIDE, 0, 509 GRU_CACHE_LINE_BYTES); 510 } 511 /* Flush CBE to hide race in context restart */ 512 mb(); 513 gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE); 514 cb += GRU_HANDLE_STRIDE; 515 } 516 517 if (data_valid) 518 memcpy(gseg + GRU_DS_BASE, save, length); 519 else 520 memset(gseg + GRU_DS_BASE, 0, length); 521 } 522 523 static void gru_unload_context_data(void *save, void *grubase, int ctxnum, 524 unsigned long cbrmap, unsigned long dsrmap) 525 { 526 void *gseg, *cb, *cbe; 527 unsigned long length; 528 int i, scr; 529 530 gseg = grubase + ctxnum * GRU_GSEG_STRIDE; 531 cb = gseg + GRU_CB_BASE; 532 cbe = grubase + GRU_CBE_BASE; 533 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; 534 535 /* CBEs may not be coherent. Flush them from cache */ 536 for_each_cbr_in_allocation_map(i, &cbrmap, scr) 537 gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE); 538 mb(); /* Let the CL flush complete */ 539 540 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 541 542 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 543 save += gru_copy_handle(save, cb); 544 save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE); 545 cb += GRU_HANDLE_STRIDE; 546 } 547 memcpy(save, gseg + GRU_DS_BASE, length); 548 } 549 550 void gru_unload_context(struct gru_thread_state *gts, int savestate) 551 { 552 struct gru_state *gru = gts->ts_gru; 553 struct gru_context_configuration_handle *cch; 554 int ctxnum = gts->ts_ctxnum; 555 556 if (!is_kernel_context(gts)) 557 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); 558 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 559 560 gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n", 561 gts, gts->ts_cbr_map, gts->ts_dsr_map); 562 lock_cch_handle(cch); 563 if (cch_interrupt_sync(cch)) 564 BUG(); 565 566 if (!is_kernel_context(gts)) 567 gru_unload_mm_tracker(gru, gts); 568 if (savestate) { 569 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, 570 ctxnum, gts->ts_cbr_map, 571 gts->ts_dsr_map); 572 gts->ts_data_valid = 1; 573 } 574 575 if (cch_deallocate(cch)) 576 BUG(); 577 unlock_cch_handle(cch); 578 579 gru_free_gru_context(gts); 580 } 581 582 /* 583 * Load a GRU context by copying it from the thread data structure in memory 584 * to the GRU. 585 */ 586 void gru_load_context(struct gru_thread_state *gts) 587 { 588 struct gru_state *gru = gts->ts_gru; 589 struct gru_context_configuration_handle *cch; 590 int i, err, asid, ctxnum = gts->ts_ctxnum; 591 592 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 593 lock_cch_handle(cch); 594 cch->tfm_fault_bit_enable = 595 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 596 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 597 cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 598 if (cch->tlb_int_enable) { 599 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 600 cch->tlb_int_select = gts->ts_tlb_int_select; 601 } 602 if (gts->ts_cch_req_slice >= 0) { 603 cch->req_slice_set_enable = 1; 604 cch->req_slice = gts->ts_cch_req_slice; 605 } else { 606 cch->req_slice_set_enable =0; 607 } 608 cch->tfm_done_bit_enable = 0; 609 cch->dsr_allocation_map = gts->ts_dsr_map; 610 cch->cbr_allocation_map = gts->ts_cbr_map; 611 612 if (is_kernel_context(gts)) { 613 cch->unmap_enable = 1; 614 cch->tfm_done_bit_enable = 1; 615 cch->cb_int_enable = 1; 616 cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */ 617 } else { 618 cch->unmap_enable = 0; 619 cch->tfm_done_bit_enable = 0; 620 cch->cb_int_enable = 0; 621 asid = gru_load_mm_tracker(gru, gts); 622 for (i = 0; i < 8; i++) { 623 cch->asid[i] = asid + i; 624 cch->sizeavail[i] = gts->ts_sizeavail; 625 } 626 } 627 628 err = cch_allocate(cch); 629 if (err) { 630 gru_dbg(grudev, 631 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", 632 err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map); 633 BUG(); 634 } 635 636 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, 637 gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid); 638 639 if (cch_start(cch)) 640 BUG(); 641 unlock_cch_handle(cch); 642 643 gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n", 644 gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map, 645 (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select); 646 } 647 648 /* 649 * Update fields in an active CCH: 650 * - retarget interrupts on local blade 651 * - update sizeavail mask 652 */ 653 int gru_update_cch(struct gru_thread_state *gts) 654 { 655 struct gru_context_configuration_handle *cch; 656 struct gru_state *gru = gts->ts_gru; 657 int i, ctxnum = gts->ts_ctxnum, ret = 0; 658 659 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 660 661 lock_cch_handle(cch); 662 if (cch->state == CCHSTATE_ACTIVE) { 663 if (gru->gs_gts[gts->ts_ctxnum] != gts) 664 goto exit; 665 if (cch_interrupt(cch)) 666 BUG(); 667 for (i = 0; i < 8; i++) 668 cch->sizeavail[i] = gts->ts_sizeavail; 669 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 670 cch->tlb_int_select = gru_cpu_fault_map_id(); 671 cch->tfm_fault_bit_enable = 672 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 673 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 674 if (cch_start(cch)) 675 BUG(); 676 ret = 1; 677 } 678 exit: 679 unlock_cch_handle(cch); 680 return ret; 681 } 682 683 /* 684 * Update CCH tlb interrupt select. Required when all the following is true: 685 * - task's GRU context is loaded into a GRU 686 * - task is using interrupt notification for TLB faults 687 * - task has migrated to a different cpu on the same blade where 688 * it was previously running. 689 */ 690 static int gru_retarget_intr(struct gru_thread_state *gts) 691 { 692 if (gts->ts_tlb_int_select < 0 693 || gts->ts_tlb_int_select == gru_cpu_fault_map_id()) 694 return 0; 695 696 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, 697 gru_cpu_fault_map_id()); 698 return gru_update_cch(gts); 699 } 700 701 /* 702 * Unload the gru context if it is not assigned to the correct blade or 703 * chiplet. Misassignment can occur if the process migrates to a different 704 * blade or if the user changes the selected blade/chiplet. 705 * Return 0 if context correct placed, otherwise 1 706 */ 707 void gru_check_context_placement(struct gru_thread_state *gts) 708 { 709 struct gru_state *gru; 710 int blade_id, chiplet_id; 711 712 /* 713 * If the current task is the context owner, verify that the 714 * context is correctly placed. This test is skipped for non-owner 715 * references. Pthread apps use non-owner references to the CBRs. 716 */ 717 gru = gts->ts_gru; 718 if (!gru || gts->ts_tgid_owner != current->tgid) 719 return; 720 721 blade_id = gts->ts_user_blade_id; 722 if (blade_id < 0) 723 blade_id = uv_numa_blade_id(); 724 725 chiplet_id = gts->ts_user_chiplet_id; 726 if (gru->gs_blade_id != blade_id || 727 (chiplet_id >= 0 && chiplet_id != gru->gs_chiplet_id)) { 728 STAT(check_context_unload); 729 gru_unload_context(gts, 1); 730 } else if (gru_retarget_intr(gts)) { 731 STAT(check_context_retarget_intr); 732 } 733 } 734 735 736 /* 737 * Insufficient GRU resources available on the local blade. Steal a context from 738 * a process. This is a hack until a _real_ resource scheduler is written.... 739 */ 740 #define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0) 741 #define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \ 742 ((g)+1) : &(b)->bs_grus[0]) 743 744 static int is_gts_stealable(struct gru_thread_state *gts, 745 struct gru_blade_state *bs) 746 { 747 if (is_kernel_context(gts)) 748 return down_write_trylock(&bs->bs_kgts_sema); 749 else 750 return mutex_trylock(>s->ts_ctxlock); 751 } 752 753 static void gts_stolen(struct gru_thread_state *gts, 754 struct gru_blade_state *bs) 755 { 756 if (is_kernel_context(gts)) { 757 up_write(&bs->bs_kgts_sema); 758 STAT(steal_kernel_context); 759 } else { 760 mutex_unlock(>s->ts_ctxlock); 761 STAT(steal_user_context); 762 } 763 } 764 765 void gru_steal_context(struct gru_thread_state *gts) 766 { 767 struct gru_blade_state *blade; 768 struct gru_state *gru, *gru0; 769 struct gru_thread_state *ngts = NULL; 770 int ctxnum, ctxnum0, flag = 0, cbr, dsr; 771 int blade_id = gts->ts_user_blade_id; 772 int chiplet_id = gts->ts_user_chiplet_id; 773 774 if (blade_id < 0) 775 blade_id = uv_numa_blade_id(); 776 cbr = gts->ts_cbr_au_count; 777 dsr = gts->ts_dsr_au_count; 778 779 blade = gru_base[blade_id]; 780 spin_lock(&blade->bs_lock); 781 782 ctxnum = next_ctxnum(blade->bs_lru_ctxnum); 783 gru = blade->bs_lru_gru; 784 if (ctxnum == 0) 785 gru = next_gru(blade, gru); 786 blade->bs_lru_gru = gru; 787 blade->bs_lru_ctxnum = ctxnum; 788 ctxnum0 = ctxnum; 789 gru0 = gru; 790 while (1) { 791 if (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id) { 792 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) 793 break; 794 spin_lock(&gru->gs_lock); 795 for (; ctxnum < GRU_NUM_CCH; ctxnum++) { 796 if (flag && gru == gru0 && ctxnum == ctxnum0) 797 break; 798 ngts = gru->gs_gts[ctxnum]; 799 /* 800 * We are grabbing locks out of order, so trylock is 801 * needed. GTSs are usually not locked, so the odds of 802 * success are high. If trylock fails, try to steal a 803 * different GSEG. 804 */ 805 if (ngts && is_gts_stealable(ngts, blade)) 806 break; 807 ngts = NULL; 808 } 809 spin_unlock(&gru->gs_lock); 810 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0)) 811 break; 812 } 813 if (flag && gru == gru0) 814 break; 815 flag = 1; 816 ctxnum = 0; 817 gru = next_gru(blade, gru); 818 } 819 spin_unlock(&blade->bs_lock); 820 821 if (ngts) { 822 gts->ustats.context_stolen++; 823 ngts->ts_steal_jiffies = jiffies; 824 gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1); 825 gts_stolen(ngts, blade); 826 } else { 827 STAT(steal_context_failed); 828 } 829 gru_dbg(grudev, 830 "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;" 831 " avail cb %ld, ds %ld\n", 832 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map), 833 hweight64(gru->gs_dsr_map)); 834 } 835 836 /* 837 * Assign a gru context. 838 */ 839 static int gru_assign_context_number(struct gru_state *gru) 840 { 841 int ctxnum; 842 843 ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); 844 __set_bit(ctxnum, &gru->gs_context_map); 845 return ctxnum; 846 } 847 848 /* 849 * Scan the GRUs on the local blade & assign a GRU context. 850 */ 851 struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts) 852 { 853 struct gru_state *gru, *grux; 854 int i, max_active_contexts; 855 int blade_id = gts->ts_user_blade_id; 856 int chiplet_id = gts->ts_user_chiplet_id; 857 858 if (blade_id < 0) 859 blade_id = uv_numa_blade_id(); 860 again: 861 gru = NULL; 862 max_active_contexts = GRU_NUM_CCH; 863 for_each_gru_on_blade(grux, blade_id, i) { 864 if (chiplet_id >= 0 && chiplet_id != grux->gs_chiplet_id) 865 continue; 866 if (check_gru_resources(grux, gts->ts_cbr_au_count, 867 gts->ts_dsr_au_count, 868 max_active_contexts)) { 869 gru = grux; 870 max_active_contexts = grux->gs_active_contexts; 871 if (max_active_contexts == 0) 872 break; 873 } 874 } 875 876 if (gru) { 877 spin_lock(&gru->gs_lock); 878 if (!check_gru_resources(gru, gts->ts_cbr_au_count, 879 gts->ts_dsr_au_count, GRU_NUM_CCH)) { 880 spin_unlock(&gru->gs_lock); 881 goto again; 882 } 883 reserve_gru_resources(gru, gts); 884 gts->ts_gru = gru; 885 gts->ts_blade = gru->gs_blade_id; 886 gts->ts_ctxnum = gru_assign_context_number(gru); 887 atomic_inc(>s->ts_refcnt); 888 gru->gs_gts[gts->ts_ctxnum] = gts; 889 spin_unlock(&gru->gs_lock); 890 891 STAT(assign_context); 892 gru_dbg(grudev, 893 "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n", 894 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts, 895 gts->ts_gru->gs_gid, gts->ts_ctxnum, 896 gts->ts_cbr_au_count, gts->ts_dsr_au_count); 897 } else { 898 gru_dbg(grudev, "failed to allocate a GTS %s\n", ""); 899 STAT(assign_context_failed); 900 } 901 902 return gru; 903 } 904 905 /* 906 * gru_nopage 907 * 908 * Map the user's GRU segment 909 * 910 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries. 911 */ 912 int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 913 { 914 struct gru_thread_state *gts; 915 unsigned long paddr, vaddr; 916 917 vaddr = (unsigned long)vmf->virtual_address; 918 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", 919 vma, vaddr, GSEG_BASE(vaddr)); 920 STAT(nopfn); 921 922 /* The following check ensures vaddr is a valid address in the VMA */ 923 gts = gru_find_thread_state(vma, TSID(vaddr, vma)); 924 if (!gts) 925 return VM_FAULT_SIGBUS; 926 927 again: 928 mutex_lock(>s->ts_ctxlock); 929 preempt_disable(); 930 931 gru_check_context_placement(gts); 932 933 if (!gts->ts_gru) { 934 STAT(load_user_context); 935 if (!gru_assign_gru_context(gts)) { 936 preempt_enable(); 937 mutex_unlock(>s->ts_ctxlock); 938 set_current_state(TASK_INTERRUPTIBLE); 939 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ 940 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies) 941 gru_steal_context(gts); 942 goto again; 943 } 944 gru_load_context(gts); 945 paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum); 946 remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1), 947 paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE, 948 vma->vm_page_prot); 949 } 950 951 preempt_enable(); 952 mutex_unlock(>s->ts_ctxlock); 953 954 return VM_FAULT_NOPAGE; 955 } 956 957