1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * SN Platform GRU Driver 4 * 5 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD 6 * 7 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/slab.h> 12 #include <linux/mm.h> 13 #include <linux/spinlock.h> 14 #include <linux/sched.h> 15 #include <linux/device.h> 16 #include <linux/list.h> 17 #include <linux/err.h> 18 #include <linux/prefetch.h> 19 #include <asm/uv/uv_hub.h> 20 #include "gru.h" 21 #include "grutables.h" 22 #include "gruhandles.h" 23 24 unsigned long gru_options __read_mostly; 25 26 static struct device_driver gru_driver = { 27 .name = "gru" 28 }; 29 30 static struct device gru_device = { 31 .init_name = "", 32 .driver = &gru_driver, 33 }; 34 35 struct device *grudev = &gru_device; 36 37 /* 38 * Select a gru fault map to be used by the current cpu. Note that 39 * multiple cpus may be using the same map. 40 * ZZZ should be inline but did not work on emulator 41 */ 42 int gru_cpu_fault_map_id(void) 43 { 44 #ifdef CONFIG_IA64 45 return uv_blade_processor_id() % GRU_NUM_TFM; 46 #else 47 int cpu = smp_processor_id(); 48 int id, core; 49 50 core = uv_cpu_core_number(cpu); 51 id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu); 52 return id; 53 #endif 54 } 55 56 /*--------- ASID Management ------------------------------------------- 57 * 58 * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID. 59 * Once MAX is reached, flush the TLB & start over. However, 60 * some asids may still be in use. There won't be many (percentage wise) still 61 * in use. Search active contexts & determine the value of the first 62 * asid in use ("x"s below). Set "limit" to this value. 63 * This defines a block of assignable asids. 64 * 65 * When "limit" is reached, search forward from limit+1 and determine the 66 * next block of assignable asids. 67 * 68 * Repeat until MAX_ASID is reached, then start over again. 69 * 70 * Each time MAX_ASID is reached, increment the asid generation. Since 71 * the search for in-use asids only checks contexts with GRUs currently 72 * assigned, asids in some contexts will be missed. Prior to loading 73 * a context, the asid generation of the GTS asid is rechecked. If it 74 * doesn't match the current generation, a new asid will be assigned. 75 * 76 * 0---------------x------------x---------------------x----| 77 * ^-next ^-limit ^-MAX_ASID 78 * 79 * All asid manipulation & context loading/unloading is protected by the 80 * gs_lock. 81 */ 82 83 /* Hit the asid limit. Start over */ 84 static int gru_wrap_asid(struct gru_state *gru) 85 { 86 gru_dbg(grudev, "gid %d\n", gru->gs_gid); 87 STAT(asid_wrap); 88 gru->gs_asid_gen++; 89 return MIN_ASID; 90 } 91 92 /* Find the next chunk of unused asids */ 93 static int gru_reset_asid_limit(struct gru_state *gru, int asid) 94 { 95 int i, gid, inuse_asid, limit; 96 97 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); 98 STAT(asid_next); 99 limit = MAX_ASID; 100 if (asid >= limit) 101 asid = gru_wrap_asid(gru); 102 gru_flush_all_tlb(gru); 103 gid = gru->gs_gid; 104 again: 105 for (i = 0; i < GRU_NUM_CCH; i++) { 106 if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i])) 107 continue; 108 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; 109 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n", 110 gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms, 111 inuse_asid, i); 112 if (inuse_asid == asid) { 113 asid += ASID_INC; 114 if (asid >= limit) { 115 /* 116 * empty range: reset the range limit and 117 * start over 118 */ 119 limit = MAX_ASID; 120 if (asid >= MAX_ASID) 121 asid = gru_wrap_asid(gru); 122 goto again; 123 } 124 } 125 126 if ((inuse_asid > asid) && (inuse_asid < limit)) 127 limit = inuse_asid; 128 } 129 gru->gs_asid_limit = limit; 130 gru->gs_asid = asid; 131 gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid, 132 asid, limit); 133 return asid; 134 } 135 136 /* Assign a new ASID to a thread context. */ 137 static int gru_assign_asid(struct gru_state *gru) 138 { 139 int asid; 140 141 gru->gs_asid += ASID_INC; 142 asid = gru->gs_asid; 143 if (asid >= gru->gs_asid_limit) 144 asid = gru_reset_asid_limit(gru, asid); 145 146 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); 147 return asid; 148 } 149 150 /* 151 * Clear n bits in a word. Return a word indicating the bits that were cleared. 152 * Optionally, build an array of chars that contain the bit numbers allocated. 153 */ 154 static unsigned long reserve_resources(unsigned long *p, int n, int mmax, 155 signed char *idx) 156 { 157 unsigned long bits = 0; 158 int i; 159 160 while (n--) { 161 i = find_first_bit(p, mmax); 162 if (i == mmax) 163 BUG(); 164 __clear_bit(i, p); 165 __set_bit(i, &bits); 166 if (idx) 167 *idx++ = i; 168 } 169 return bits; 170 } 171 172 unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count, 173 signed char *cbmap) 174 { 175 return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU, 176 cbmap); 177 } 178 179 unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count, 180 signed char *dsmap) 181 { 182 return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU, 183 dsmap); 184 } 185 186 static void reserve_gru_resources(struct gru_state *gru, 187 struct gru_thread_state *gts) 188 { 189 gru->gs_active_contexts++; 190 gts->ts_cbr_map = 191 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count, 192 gts->ts_cbr_idx); 193 gts->ts_dsr_map = 194 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL); 195 } 196 197 static void free_gru_resources(struct gru_state *gru, 198 struct gru_thread_state *gts) 199 { 200 gru->gs_active_contexts--; 201 gru->gs_cbr_map |= gts->ts_cbr_map; 202 gru->gs_dsr_map |= gts->ts_dsr_map; 203 } 204 205 /* 206 * Check if a GRU has sufficient free resources to satisfy an allocation 207 * request. Note: GRU locks may or may not be held when this is called. If 208 * not held, recheck after acquiring the appropriate locks. 209 * 210 * Returns 1 if sufficient resources, 0 if not 211 */ 212 static int check_gru_resources(struct gru_state *gru, int cbr_au_count, 213 int dsr_au_count, int max_active_contexts) 214 { 215 return hweight64(gru->gs_cbr_map) >= cbr_au_count 216 && hweight64(gru->gs_dsr_map) >= dsr_au_count 217 && gru->gs_active_contexts < max_active_contexts; 218 } 219 220 /* 221 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG 222 * context. 223 */ 224 static int gru_load_mm_tracker(struct gru_state *gru, 225 struct gru_thread_state *gts) 226 { 227 struct gru_mm_struct *gms = gts->ts_gms; 228 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid]; 229 unsigned short ctxbitmap = (1 << gts->ts_ctxnum); 230 int asid; 231 232 spin_lock(&gms->ms_asid_lock); 233 asid = asids->mt_asid; 234 235 spin_lock(&gru->gs_asid_lock); 236 if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen != 237 gru->gs_asid_gen)) { 238 asid = gru_assign_asid(gru); 239 asids->mt_asid = asid; 240 asids->mt_asid_gen = gru->gs_asid_gen; 241 STAT(asid_new); 242 } else { 243 STAT(asid_reuse); 244 } 245 spin_unlock(&gru->gs_asid_lock); 246 247 BUG_ON(asids->mt_ctxbitmap & ctxbitmap); 248 asids->mt_ctxbitmap |= ctxbitmap; 249 if (!test_bit(gru->gs_gid, gms->ms_asidmap)) 250 __set_bit(gru->gs_gid, gms->ms_asidmap); 251 spin_unlock(&gms->ms_asid_lock); 252 253 gru_dbg(grudev, 254 "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n", 255 gru->gs_gid, gts, gms, gts->ts_ctxnum, asid, 256 gms->ms_asidmap[0]); 257 return asid; 258 } 259 260 static void gru_unload_mm_tracker(struct gru_state *gru, 261 struct gru_thread_state *gts) 262 { 263 struct gru_mm_struct *gms = gts->ts_gms; 264 struct gru_mm_tracker *asids; 265 unsigned short ctxbitmap; 266 267 asids = &gms->ms_asids[gru->gs_gid]; 268 ctxbitmap = (1 << gts->ts_ctxnum); 269 spin_lock(&gms->ms_asid_lock); 270 spin_lock(&gru->gs_asid_lock); 271 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); 272 asids->mt_ctxbitmap ^= ctxbitmap; 273 gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum %d, asidmap 0x%lx\n", 274 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]); 275 spin_unlock(&gru->gs_asid_lock); 276 spin_unlock(&gms->ms_asid_lock); 277 } 278 279 /* 280 * Decrement the reference count on a GTS structure. Free the structure 281 * if the reference count goes to zero. 282 */ 283 void gts_drop(struct gru_thread_state *gts) 284 { 285 if (gts && refcount_dec_and_test(>s->ts_refcnt)) { 286 if (gts->ts_gms) 287 gru_drop_mmu_notifier(gts->ts_gms); 288 kfree(gts); 289 STAT(gts_free); 290 } 291 } 292 293 /* 294 * Locate the GTS structure for the current thread. 295 */ 296 static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data 297 *vdata, int tsid) 298 { 299 struct gru_thread_state *gts; 300 301 list_for_each_entry(gts, &vdata->vd_head, ts_next) 302 if (gts->ts_tsid == tsid) 303 return gts; 304 return NULL; 305 } 306 307 /* 308 * Allocate a thread state structure. 309 */ 310 struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, 311 int cbr_au_count, int dsr_au_count, 312 unsigned char tlb_preload_count, int options, int tsid) 313 { 314 struct gru_thread_state *gts; 315 struct gru_mm_struct *gms; 316 int bytes; 317 318 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count); 319 bytes += sizeof(struct gru_thread_state); 320 gts = kmalloc(bytes, GFP_KERNEL); 321 if (!gts) 322 return ERR_PTR(-ENOMEM); 323 324 STAT(gts_alloc); 325 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */ 326 refcount_set(>s->ts_refcnt, 1); 327 mutex_init(>s->ts_ctxlock); 328 gts->ts_cbr_au_count = cbr_au_count; 329 gts->ts_dsr_au_count = dsr_au_count; 330 gts->ts_tlb_preload_count = tlb_preload_count; 331 gts->ts_user_options = options; 332 gts->ts_user_blade_id = -1; 333 gts->ts_user_chiplet_id = -1; 334 gts->ts_tsid = tsid; 335 gts->ts_ctxnum = NULLCTX; 336 gts->ts_tlb_int_select = -1; 337 gts->ts_cch_req_slice = -1; 338 gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT); 339 if (vma) { 340 gts->ts_mm = current->mm; 341 gts->ts_vma = vma; 342 gms = gru_register_mmu_notifier(); 343 if (IS_ERR(gms)) 344 goto err; 345 gts->ts_gms = gms; 346 } 347 348 gru_dbg(grudev, "alloc gts %p\n", gts); 349 return gts; 350 351 err: 352 gts_drop(gts); 353 return ERR_CAST(gms); 354 } 355 356 /* 357 * Allocate a vma private data structure. 358 */ 359 struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid) 360 { 361 struct gru_vma_data *vdata = NULL; 362 363 vdata = kmalloc(sizeof(*vdata), GFP_KERNEL); 364 if (!vdata) 365 return NULL; 366 367 STAT(vdata_alloc); 368 INIT_LIST_HEAD(&vdata->vd_head); 369 spin_lock_init(&vdata->vd_lock); 370 gru_dbg(grudev, "alloc vdata %p\n", vdata); 371 return vdata; 372 } 373 374 /* 375 * Find the thread state structure for the current thread. 376 */ 377 struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma, 378 int tsid) 379 { 380 struct gru_vma_data *vdata = vma->vm_private_data; 381 struct gru_thread_state *gts; 382 383 spin_lock(&vdata->vd_lock); 384 gts = gru_find_current_gts_nolock(vdata, tsid); 385 spin_unlock(&vdata->vd_lock); 386 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); 387 return gts; 388 } 389 390 /* 391 * Allocate a new thread state for a GSEG. Note that races may allow 392 * another thread to race to create a gts. 393 */ 394 struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, 395 int tsid) 396 { 397 struct gru_vma_data *vdata = vma->vm_private_data; 398 struct gru_thread_state *gts, *ngts; 399 400 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, 401 vdata->vd_dsr_au_count, 402 vdata->vd_tlb_preload_count, 403 vdata->vd_user_options, tsid); 404 if (IS_ERR(gts)) 405 return gts; 406 407 spin_lock(&vdata->vd_lock); 408 ngts = gru_find_current_gts_nolock(vdata, tsid); 409 if (ngts) { 410 gts_drop(gts); 411 gts = ngts; 412 STAT(gts_double_allocate); 413 } else { 414 list_add(>s->ts_next, &vdata->vd_head); 415 } 416 spin_unlock(&vdata->vd_lock); 417 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); 418 return gts; 419 } 420 421 /* 422 * Free the GRU context assigned to the thread state. 423 */ 424 static void gru_free_gru_context(struct gru_thread_state *gts) 425 { 426 struct gru_state *gru; 427 428 gru = gts->ts_gru; 429 gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid); 430 431 spin_lock(&gru->gs_lock); 432 gru->gs_gts[gts->ts_ctxnum] = NULL; 433 free_gru_resources(gru, gts); 434 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0); 435 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map); 436 gts->ts_ctxnum = NULLCTX; 437 gts->ts_gru = NULL; 438 gts->ts_blade = -1; 439 spin_unlock(&gru->gs_lock); 440 441 gts_drop(gts); 442 STAT(free_context); 443 } 444 445 /* 446 * Prefetching cachelines help hardware performance. 447 * (Strictly a performance enhancement. Not functionally required). 448 */ 449 static void prefetch_data(void *p, int num, int stride) 450 { 451 while (num-- > 0) { 452 prefetchw(p); 453 p += stride; 454 } 455 } 456 457 static inline long gru_copy_handle(void *d, void *s) 458 { 459 memcpy(d, s, GRU_HANDLE_BYTES); 460 return GRU_HANDLE_BYTES; 461 } 462 463 static void gru_prefetch_context(void *gseg, void *cb, void *cbe, 464 unsigned long cbrmap, unsigned long length) 465 { 466 int i, scr; 467 468 prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES, 469 GRU_CACHE_LINE_BYTES); 470 471 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 472 prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES); 473 prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1, 474 GRU_CACHE_LINE_BYTES); 475 cb += GRU_HANDLE_STRIDE; 476 } 477 } 478 479 static void gru_load_context_data(void *save, void *grubase, int ctxnum, 480 unsigned long cbrmap, unsigned long dsrmap, 481 int data_valid) 482 { 483 void *gseg, *cb, *cbe; 484 unsigned long length; 485 int i, scr; 486 487 gseg = grubase + ctxnum * GRU_GSEG_STRIDE; 488 cb = gseg + GRU_CB_BASE; 489 cbe = grubase + GRU_CBE_BASE; 490 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; 491 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 492 493 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 494 if (data_valid) { 495 save += gru_copy_handle(cb, save); 496 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, 497 save); 498 } else { 499 memset(cb, 0, GRU_CACHE_LINE_BYTES); 500 memset(cbe + i * GRU_HANDLE_STRIDE, 0, 501 GRU_CACHE_LINE_BYTES); 502 } 503 /* Flush CBE to hide race in context restart */ 504 mb(); 505 gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE); 506 cb += GRU_HANDLE_STRIDE; 507 } 508 509 if (data_valid) 510 memcpy(gseg + GRU_DS_BASE, save, length); 511 else 512 memset(gseg + GRU_DS_BASE, 0, length); 513 } 514 515 static void gru_unload_context_data(void *save, void *grubase, int ctxnum, 516 unsigned long cbrmap, unsigned long dsrmap) 517 { 518 void *gseg, *cb, *cbe; 519 unsigned long length; 520 int i, scr; 521 522 gseg = grubase + ctxnum * GRU_GSEG_STRIDE; 523 cb = gseg + GRU_CB_BASE; 524 cbe = grubase + GRU_CBE_BASE; 525 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; 526 527 /* CBEs may not be coherent. Flush them from cache */ 528 for_each_cbr_in_allocation_map(i, &cbrmap, scr) 529 gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE); 530 mb(); /* Let the CL flush complete */ 531 532 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 533 534 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 535 save += gru_copy_handle(save, cb); 536 save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE); 537 cb += GRU_HANDLE_STRIDE; 538 } 539 memcpy(save, gseg + GRU_DS_BASE, length); 540 } 541 542 void gru_unload_context(struct gru_thread_state *gts, int savestate) 543 { 544 struct gru_state *gru = gts->ts_gru; 545 struct gru_context_configuration_handle *cch; 546 int ctxnum = gts->ts_ctxnum; 547 548 if (!is_kernel_context(gts)) 549 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); 550 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 551 552 gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n", 553 gts, gts->ts_cbr_map, gts->ts_dsr_map); 554 lock_cch_handle(cch); 555 if (cch_interrupt_sync(cch)) 556 BUG(); 557 558 if (!is_kernel_context(gts)) 559 gru_unload_mm_tracker(gru, gts); 560 if (savestate) { 561 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, 562 ctxnum, gts->ts_cbr_map, 563 gts->ts_dsr_map); 564 gts->ts_data_valid = 1; 565 } 566 567 if (cch_deallocate(cch)) 568 BUG(); 569 unlock_cch_handle(cch); 570 571 gru_free_gru_context(gts); 572 } 573 574 /* 575 * Load a GRU context by copying it from the thread data structure in memory 576 * to the GRU. 577 */ 578 void gru_load_context(struct gru_thread_state *gts) 579 { 580 struct gru_state *gru = gts->ts_gru; 581 struct gru_context_configuration_handle *cch; 582 int i, err, asid, ctxnum = gts->ts_ctxnum; 583 584 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 585 lock_cch_handle(cch); 586 cch->tfm_fault_bit_enable = 587 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 588 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 589 cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 590 if (cch->tlb_int_enable) { 591 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 592 cch->tlb_int_select = gts->ts_tlb_int_select; 593 } 594 if (gts->ts_cch_req_slice >= 0) { 595 cch->req_slice_set_enable = 1; 596 cch->req_slice = gts->ts_cch_req_slice; 597 } else { 598 cch->req_slice_set_enable =0; 599 } 600 cch->tfm_done_bit_enable = 0; 601 cch->dsr_allocation_map = gts->ts_dsr_map; 602 cch->cbr_allocation_map = gts->ts_cbr_map; 603 604 if (is_kernel_context(gts)) { 605 cch->unmap_enable = 1; 606 cch->tfm_done_bit_enable = 1; 607 cch->cb_int_enable = 1; 608 cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */ 609 } else { 610 cch->unmap_enable = 0; 611 cch->tfm_done_bit_enable = 0; 612 cch->cb_int_enable = 0; 613 asid = gru_load_mm_tracker(gru, gts); 614 for (i = 0; i < 8; i++) { 615 cch->asid[i] = asid + i; 616 cch->sizeavail[i] = gts->ts_sizeavail; 617 } 618 } 619 620 err = cch_allocate(cch); 621 if (err) { 622 gru_dbg(grudev, 623 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", 624 err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map); 625 BUG(); 626 } 627 628 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, 629 gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid); 630 631 if (cch_start(cch)) 632 BUG(); 633 unlock_cch_handle(cch); 634 635 gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n", 636 gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map, 637 (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select); 638 } 639 640 /* 641 * Update fields in an active CCH: 642 * - retarget interrupts on local blade 643 * - update sizeavail mask 644 */ 645 int gru_update_cch(struct gru_thread_state *gts) 646 { 647 struct gru_context_configuration_handle *cch; 648 struct gru_state *gru = gts->ts_gru; 649 int i, ctxnum = gts->ts_ctxnum, ret = 0; 650 651 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 652 653 lock_cch_handle(cch); 654 if (cch->state == CCHSTATE_ACTIVE) { 655 if (gru->gs_gts[gts->ts_ctxnum] != gts) 656 goto exit; 657 if (cch_interrupt(cch)) 658 BUG(); 659 for (i = 0; i < 8; i++) 660 cch->sizeavail[i] = gts->ts_sizeavail; 661 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 662 cch->tlb_int_select = gru_cpu_fault_map_id(); 663 cch->tfm_fault_bit_enable = 664 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 665 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 666 if (cch_start(cch)) 667 BUG(); 668 ret = 1; 669 } 670 exit: 671 unlock_cch_handle(cch); 672 return ret; 673 } 674 675 /* 676 * Update CCH tlb interrupt select. Required when all the following is true: 677 * - task's GRU context is loaded into a GRU 678 * - task is using interrupt notification for TLB faults 679 * - task has migrated to a different cpu on the same blade where 680 * it was previously running. 681 */ 682 static int gru_retarget_intr(struct gru_thread_state *gts) 683 { 684 if (gts->ts_tlb_int_select < 0 685 || gts->ts_tlb_int_select == gru_cpu_fault_map_id()) 686 return 0; 687 688 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, 689 gru_cpu_fault_map_id()); 690 return gru_update_cch(gts); 691 } 692 693 /* 694 * Check if a GRU context is allowed to use a specific chiplet. By default 695 * a context is assigned to any blade-local chiplet. However, users can 696 * override this. 697 * Returns 1 if assignment allowed, 0 otherwise 698 */ 699 static int gru_check_chiplet_assignment(struct gru_state *gru, 700 struct gru_thread_state *gts) 701 { 702 int blade_id; 703 int chiplet_id; 704 705 blade_id = gts->ts_user_blade_id; 706 if (blade_id < 0) 707 blade_id = uv_numa_blade_id(); 708 709 chiplet_id = gts->ts_user_chiplet_id; 710 return gru->gs_blade_id == blade_id && 711 (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id); 712 } 713 714 /* 715 * Unload the gru context if it is not assigned to the correct blade or 716 * chiplet. Misassignment can occur if the process migrates to a different 717 * blade or if the user changes the selected blade/chiplet. 718 */ 719 int gru_check_context_placement(struct gru_thread_state *gts) 720 { 721 struct gru_state *gru; 722 int ret = 0; 723 724 /* 725 * If the current task is the context owner, verify that the 726 * context is correctly placed. This test is skipped for non-owner 727 * references. Pthread apps use non-owner references to the CBRs. 728 */ 729 gru = gts->ts_gru; 730 /* 731 * If gru or gts->ts_tgid_owner isn't initialized properly, return 732 * success to indicate that the caller does not need to unload the 733 * gru context.The caller is responsible for their inspection and 734 * reinitialization if needed. 735 */ 736 if (!gru || gts->ts_tgid_owner != current->tgid) 737 return ret; 738 739 if (!gru_check_chiplet_assignment(gru, gts)) { 740 STAT(check_context_unload); 741 ret = -EINVAL; 742 } else if (gru_retarget_intr(gts)) { 743 STAT(check_context_retarget_intr); 744 } 745 746 return ret; 747 } 748 749 750 /* 751 * Insufficient GRU resources available on the local blade. Steal a context from 752 * a process. This is a hack until a _real_ resource scheduler is written.... 753 */ 754 #define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0) 755 #define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \ 756 ((g)+1) : &(b)->bs_grus[0]) 757 758 static int is_gts_stealable(struct gru_thread_state *gts, 759 struct gru_blade_state *bs) 760 { 761 if (is_kernel_context(gts)) 762 return down_write_trylock(&bs->bs_kgts_sema); 763 else 764 return mutex_trylock(>s->ts_ctxlock); 765 } 766 767 static void gts_stolen(struct gru_thread_state *gts, 768 struct gru_blade_state *bs) 769 { 770 if (is_kernel_context(gts)) { 771 up_write(&bs->bs_kgts_sema); 772 STAT(steal_kernel_context); 773 } else { 774 mutex_unlock(>s->ts_ctxlock); 775 STAT(steal_user_context); 776 } 777 } 778 779 void gru_steal_context(struct gru_thread_state *gts) 780 { 781 struct gru_blade_state *blade; 782 struct gru_state *gru, *gru0; 783 struct gru_thread_state *ngts = NULL; 784 int ctxnum, ctxnum0, flag = 0, cbr, dsr; 785 int blade_id; 786 787 blade_id = gts->ts_user_blade_id; 788 if (blade_id < 0) 789 blade_id = uv_numa_blade_id(); 790 cbr = gts->ts_cbr_au_count; 791 dsr = gts->ts_dsr_au_count; 792 793 blade = gru_base[blade_id]; 794 spin_lock(&blade->bs_lock); 795 796 ctxnum = next_ctxnum(blade->bs_lru_ctxnum); 797 gru = blade->bs_lru_gru; 798 if (ctxnum == 0) 799 gru = next_gru(blade, gru); 800 blade->bs_lru_gru = gru; 801 blade->bs_lru_ctxnum = ctxnum; 802 ctxnum0 = ctxnum; 803 gru0 = gru; 804 while (1) { 805 if (gru_check_chiplet_assignment(gru, gts)) { 806 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) 807 break; 808 spin_lock(&gru->gs_lock); 809 for (; ctxnum < GRU_NUM_CCH; ctxnum++) { 810 if (flag && gru == gru0 && ctxnum == ctxnum0) 811 break; 812 ngts = gru->gs_gts[ctxnum]; 813 /* 814 * We are grabbing locks out of order, so trylock is 815 * needed. GTSs are usually not locked, so the odds of 816 * success are high. If trylock fails, try to steal a 817 * different GSEG. 818 */ 819 if (ngts && is_gts_stealable(ngts, blade)) 820 break; 821 ngts = NULL; 822 } 823 spin_unlock(&gru->gs_lock); 824 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0)) 825 break; 826 } 827 if (flag && gru == gru0) 828 break; 829 flag = 1; 830 ctxnum = 0; 831 gru = next_gru(blade, gru); 832 } 833 spin_unlock(&blade->bs_lock); 834 835 if (ngts) { 836 gts->ustats.context_stolen++; 837 ngts->ts_steal_jiffies = jiffies; 838 gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1); 839 gts_stolen(ngts, blade); 840 } else { 841 STAT(steal_context_failed); 842 } 843 gru_dbg(grudev, 844 "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;" 845 " avail cb %ld, ds %ld\n", 846 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map), 847 hweight64(gru->gs_dsr_map)); 848 } 849 850 /* 851 * Assign a gru context. 852 */ 853 static int gru_assign_context_number(struct gru_state *gru) 854 { 855 int ctxnum; 856 857 ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); 858 __set_bit(ctxnum, &gru->gs_context_map); 859 return ctxnum; 860 } 861 862 /* 863 * Scan the GRUs on the local blade & assign a GRU context. 864 */ 865 struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts) 866 { 867 struct gru_state *gru, *grux; 868 int i, max_active_contexts; 869 int blade_id = gts->ts_user_blade_id; 870 871 if (blade_id < 0) 872 blade_id = uv_numa_blade_id(); 873 again: 874 gru = NULL; 875 max_active_contexts = GRU_NUM_CCH; 876 for_each_gru_on_blade(grux, blade_id, i) { 877 if (!gru_check_chiplet_assignment(grux, gts)) 878 continue; 879 if (check_gru_resources(grux, gts->ts_cbr_au_count, 880 gts->ts_dsr_au_count, 881 max_active_contexts)) { 882 gru = grux; 883 max_active_contexts = grux->gs_active_contexts; 884 if (max_active_contexts == 0) 885 break; 886 } 887 } 888 889 if (gru) { 890 spin_lock(&gru->gs_lock); 891 if (!check_gru_resources(gru, gts->ts_cbr_au_count, 892 gts->ts_dsr_au_count, GRU_NUM_CCH)) { 893 spin_unlock(&gru->gs_lock); 894 goto again; 895 } 896 reserve_gru_resources(gru, gts); 897 gts->ts_gru = gru; 898 gts->ts_blade = gru->gs_blade_id; 899 gts->ts_ctxnum = gru_assign_context_number(gru); 900 refcount_inc(>s->ts_refcnt); 901 gru->gs_gts[gts->ts_ctxnum] = gts; 902 spin_unlock(&gru->gs_lock); 903 904 STAT(assign_context); 905 gru_dbg(grudev, 906 "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n", 907 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts, 908 gts->ts_gru->gs_gid, gts->ts_ctxnum, 909 gts->ts_cbr_au_count, gts->ts_dsr_au_count); 910 } else { 911 gru_dbg(grudev, "failed to allocate a GTS %s\n", ""); 912 STAT(assign_context_failed); 913 } 914 915 return gru; 916 } 917 918 /* 919 * gru_nopage 920 * 921 * Map the user's GRU segment 922 * 923 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries. 924 */ 925 vm_fault_t gru_fault(struct vm_fault *vmf) 926 { 927 struct vm_area_struct *vma = vmf->vma; 928 struct gru_thread_state *gts; 929 unsigned long paddr, vaddr; 930 unsigned long expires; 931 932 vaddr = vmf->address; 933 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", 934 vma, vaddr, GSEG_BASE(vaddr)); 935 STAT(nopfn); 936 937 /* The following check ensures vaddr is a valid address in the VMA */ 938 gts = gru_find_thread_state(vma, TSID(vaddr, vma)); 939 if (!gts) 940 return VM_FAULT_SIGBUS; 941 942 again: 943 mutex_lock(>s->ts_ctxlock); 944 945 if (gru_check_context_placement(gts)) { 946 mutex_unlock(>s->ts_ctxlock); 947 gru_unload_context(gts, 1); 948 return VM_FAULT_NOPAGE; 949 } 950 951 if (!gts->ts_gru) { 952 STAT(load_user_context); 953 if (!gru_assign_gru_context(gts)) { 954 mutex_unlock(>s->ts_ctxlock); 955 set_current_state(TASK_INTERRUPTIBLE); 956 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ 957 expires = gts->ts_steal_jiffies + GRU_STEAL_DELAY; 958 if (time_before(expires, jiffies)) 959 gru_steal_context(gts); 960 goto again; 961 } 962 gru_load_context(gts); 963 paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum); 964 remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1), 965 paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE, 966 vma->vm_page_prot); 967 } 968 969 mutex_unlock(>s->ts_ctxlock); 970 971 return VM_FAULT_NOPAGE; 972 } 973 974