1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include <linux/slab.h> /* fault-inject.h is not standalone! */ 7 8 #include <linux/fault-inject.h> 9 #include <linux/sched/mm.h> 10 11 #include <drm/drm_cache.h> 12 13 #include "gem/i915_gem_lmem.h" 14 #include "i915_trace.h" 15 #include "intel_gt.h" 16 #include "intel_gt_regs.h" 17 #include "intel_gtt.h" 18 19 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz) 20 { 21 struct drm_i915_gem_object *obj; 22 23 /* 24 * To avoid severe over-allocation when dealing with min_page_size 25 * restrictions, we override that behaviour here by allowing an object 26 * size and page layout which can be smaller. In practice this should be 27 * totally fine, since GTT paging structures are not typically inserted 28 * into the GTT. 29 * 30 * Note that we also hit this path for the scratch page, and for this 31 * case it might need to be 64K, but that should work fine here since we 32 * used the passed in size for the page size, which should ensure it 33 * also has the same alignment. 34 */ 35 obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz, 36 vm->lmem_pt_obj_flags); 37 /* 38 * Ensure all paging structures for this vm share the same dma-resv 39 * object underneath, with the idea that one object_lock() will lock 40 * them all at once. 41 */ 42 if (!IS_ERR(obj)) { 43 obj->base.resv = i915_vm_resv_get(vm); 44 obj->shares_resv_from = vm; 45 } 46 47 return obj; 48 } 49 50 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) 51 { 52 struct drm_i915_gem_object *obj; 53 54 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) 55 i915_gem_shrink_all(vm->i915); 56 57 obj = i915_gem_object_create_internal(vm->i915, sz); 58 /* 59 * Ensure all paging structures for this vm share the same dma-resv 60 * object underneath, with the idea that one object_lock() will lock 61 * them all at once. 62 */ 63 if (!IS_ERR(obj)) { 64 obj->base.resv = i915_vm_resv_get(vm); 65 obj->shares_resv_from = vm; 66 } 67 68 return obj; 69 } 70 71 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj) 72 { 73 enum i915_map_type type; 74 void *vaddr; 75 76 type = i915_coherent_map_type(vm->i915, obj, true); 77 vaddr = i915_gem_object_pin_map_unlocked(obj, type); 78 if (IS_ERR(vaddr)) 79 return PTR_ERR(vaddr); 80 81 i915_gem_object_make_unshrinkable(obj); 82 return 0; 83 } 84 85 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj) 86 { 87 enum i915_map_type type; 88 void *vaddr; 89 90 type = i915_coherent_map_type(vm->i915, obj, true); 91 vaddr = i915_gem_object_pin_map(obj, type); 92 if (IS_ERR(vaddr)) 93 return PTR_ERR(vaddr); 94 95 i915_gem_object_make_unshrinkable(obj); 96 return 0; 97 } 98 99 void __i915_vm_close(struct i915_address_space *vm) 100 { 101 struct i915_vma *vma, *vn; 102 103 if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex)) 104 return; 105 106 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { 107 struct drm_i915_gem_object *obj = vma->obj; 108 109 /* Keep the obj (and hence the vma) alive as _we_ destroy it */ 110 if (!kref_get_unless_zero(&obj->base.refcount)) 111 continue; 112 113 atomic_and(~I915_VMA_PIN_MASK, &vma->flags); 114 WARN_ON(__i915_vma_unbind(vma)); 115 __i915_vma_put(vma); 116 117 i915_gem_object_put(obj); 118 } 119 GEM_BUG_ON(!list_empty(&vm->bound_list)); 120 121 mutex_unlock(&vm->mutex); 122 } 123 124 /* lock the vm into the current ww, if we lock one, we lock all */ 125 int i915_vm_lock_objects(struct i915_address_space *vm, 126 struct i915_gem_ww_ctx *ww) 127 { 128 if (vm->scratch[0]->base.resv == &vm->_resv) { 129 return i915_gem_object_lock(vm->scratch[0], ww); 130 } else { 131 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 132 133 /* We borrowed the scratch page from ggtt, take the top level object */ 134 return i915_gem_object_lock(ppgtt->pd->pt.base, ww); 135 } 136 } 137 138 void i915_address_space_fini(struct i915_address_space *vm) 139 { 140 drm_mm_takedown(&vm->mm); 141 mutex_destroy(&vm->mutex); 142 } 143 144 /** 145 * i915_vm_resv_release - Final struct i915_address_space destructor 146 * @kref: Pointer to the &i915_address_space.resv_ref member. 147 * 148 * This function is called when the last lock sharer no longer shares the 149 * &i915_address_space._resv lock. 150 */ 151 void i915_vm_resv_release(struct kref *kref) 152 { 153 struct i915_address_space *vm = 154 container_of(kref, typeof(*vm), resv_ref); 155 156 dma_resv_fini(&vm->_resv); 157 kfree(vm); 158 } 159 160 static void __i915_vm_release(struct work_struct *work) 161 { 162 struct i915_address_space *vm = 163 container_of(work, struct i915_address_space, release_work); 164 165 vm->cleanup(vm); 166 i915_address_space_fini(vm); 167 168 i915_vm_resv_put(vm); 169 } 170 171 void i915_vm_release(struct kref *kref) 172 { 173 struct i915_address_space *vm = 174 container_of(kref, struct i915_address_space, ref); 175 176 GEM_BUG_ON(i915_is_ggtt(vm)); 177 trace_i915_ppgtt_release(vm); 178 179 queue_work(vm->i915->wq, &vm->release_work); 180 } 181 182 void i915_address_space_init(struct i915_address_space *vm, int subclass) 183 { 184 kref_init(&vm->ref); 185 186 /* 187 * Special case for GGTT that has already done an early 188 * kref_init here. 189 */ 190 if (!kref_read(&vm->resv_ref)) 191 kref_init(&vm->resv_ref); 192 193 INIT_WORK(&vm->release_work, __i915_vm_release); 194 atomic_set(&vm->open, 1); 195 196 /* 197 * The vm->mutex must be reclaim safe (for use in the shrinker). 198 * Do a dummy acquire now under fs_reclaim so that any allocation 199 * attempt holding the lock is immediately reported by lockdep. 200 */ 201 mutex_init(&vm->mutex); 202 lockdep_set_subclass(&vm->mutex, subclass); 203 204 if (!intel_vm_no_concurrent_access_wa(vm->i915)) { 205 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex); 206 } else { 207 /* 208 * CHV + BXT VTD workaround use stop_machine(), 209 * which is allowed to allocate memory. This means &vm->mutex 210 * is the outer lock, and in theory we can allocate memory inside 211 * it through stop_machine(). 212 * 213 * Add the annotation for this, we use trylock in shrinker. 214 */ 215 mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_); 216 might_alloc(GFP_KERNEL); 217 mutex_release(&vm->mutex.dep_map, _THIS_IP_); 218 } 219 dma_resv_init(&vm->_resv); 220 221 GEM_BUG_ON(!vm->total); 222 drm_mm_init(&vm->mm, 0, vm->total); 223 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; 224 225 INIT_LIST_HEAD(&vm->bound_list); 226 } 227 228 void *__px_vaddr(struct drm_i915_gem_object *p) 229 { 230 enum i915_map_type type; 231 232 GEM_BUG_ON(!i915_gem_object_has_pages(p)); 233 return page_unpack_bits(p->mm.mapping, &type); 234 } 235 236 dma_addr_t __px_dma(struct drm_i915_gem_object *p) 237 { 238 GEM_BUG_ON(!i915_gem_object_has_pages(p)); 239 return sg_dma_address(p->mm.pages->sgl); 240 } 241 242 struct page *__px_page(struct drm_i915_gem_object *p) 243 { 244 GEM_BUG_ON(!i915_gem_object_has_pages(p)); 245 return sg_page(p->mm.pages->sgl); 246 } 247 248 void 249 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count) 250 { 251 void *vaddr = __px_vaddr(p); 252 253 memset64(vaddr, val, count); 254 clflush_cache_range(vaddr, PAGE_SIZE); 255 } 256 257 static void poison_scratch_page(struct drm_i915_gem_object *scratch) 258 { 259 void *vaddr = __px_vaddr(scratch); 260 u8 val; 261 262 val = 0; 263 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 264 val = POISON_FREE; 265 266 memset(vaddr, val, scratch->base.size); 267 drm_clflush_virt_range(vaddr, scratch->base.size); 268 } 269 270 int setup_scratch_page(struct i915_address_space *vm) 271 { 272 unsigned long size; 273 274 /* 275 * In order to utilize 64K pages for an object with a size < 2M, we will 276 * need to support a 64K scratch page, given that every 16th entry for a 277 * page-table operating in 64K mode must point to a properly aligned 64K 278 * region, including any PTEs which happen to point to scratch. 279 * 280 * This is only relevant for the 48b PPGTT where we support 281 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the 282 * scratch (read-only) between all vm, we create one 64k scratch page 283 * for all. 284 */ 285 size = I915_GTT_PAGE_SIZE_4K; 286 if (i915_vm_is_4lvl(vm) && 287 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) 288 size = I915_GTT_PAGE_SIZE_64K; 289 290 do { 291 struct drm_i915_gem_object *obj; 292 293 obj = vm->alloc_scratch_dma(vm, size); 294 if (IS_ERR(obj)) 295 goto skip; 296 297 if (map_pt_dma(vm, obj)) 298 goto skip_obj; 299 300 /* We need a single contiguous page for our scratch */ 301 if (obj->mm.page_sizes.sg < size) 302 goto skip_obj; 303 304 /* And it needs to be correspondingly aligned */ 305 if (__px_dma(obj) & (size - 1)) 306 goto skip_obj; 307 308 /* 309 * Use a non-zero scratch page for debugging. 310 * 311 * We want a value that should be reasonably obvious 312 * to spot in the error state, while also causing a GPU hang 313 * if executed. We prefer using a clear page in production, so 314 * should it ever be accidentally used, the effect should be 315 * fairly benign. 316 */ 317 poison_scratch_page(obj); 318 319 vm->scratch[0] = obj; 320 vm->scratch_order = get_order(size); 321 return 0; 322 323 skip_obj: 324 i915_gem_object_put(obj); 325 skip: 326 if (size == I915_GTT_PAGE_SIZE_4K) 327 return -ENOMEM; 328 329 /* 330 * If we need 64K minimum GTT pages for device local-memory, 331 * like on XEHPSDV, then we need to fail the allocation here, 332 * otherwise we can't safely support the insertion of 333 * local-memory pages for this vm, since the HW expects the 334 * correct physical alignment and size when the page-table is 335 * operating in 64K GTT mode, which includes any scratch PTEs, 336 * since userspace can still touch them. 337 */ 338 if (HAS_64K_PAGES(vm->i915)) 339 return -ENOMEM; 340 341 size = I915_GTT_PAGE_SIZE_4K; 342 } while (1); 343 } 344 345 void free_scratch(struct i915_address_space *vm) 346 { 347 int i; 348 349 for (i = 0; i <= vm->top; i++) 350 i915_gem_object_put(vm->scratch[i]); 351 } 352 353 void gtt_write_workarounds(struct intel_gt *gt) 354 { 355 struct drm_i915_private *i915 = gt->i915; 356 struct intel_uncore *uncore = gt->uncore; 357 358 /* 359 * This function is for gtt related workarounds. This function is 360 * called on driver load and after a GPU reset, so you can place 361 * workarounds here even if they get overwritten by GPU reset. 362 */ 363 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */ 364 if (IS_BROADWELL(i915)) 365 intel_uncore_write(uncore, 366 GEN8_L3_LRA_1_GPGPU, 367 GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); 368 else if (IS_CHERRYVIEW(i915)) 369 intel_uncore_write(uncore, 370 GEN8_L3_LRA_1_GPGPU, 371 GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); 372 else if (IS_GEN9_LP(i915)) 373 intel_uncore_write(uncore, 374 GEN8_L3_LRA_1_GPGPU, 375 GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); 376 else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11) 377 intel_uncore_write(uncore, 378 GEN8_L3_LRA_1_GPGPU, 379 GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); 380 381 /* 382 * To support 64K PTEs we need to first enable the use of the 383 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical 384 * mmio, otherwise the page-walker will simply ignore the IPS bit. This 385 * shouldn't be needed after GEN10. 386 * 387 * 64K pages were first introduced from BDW+, although technically they 388 * only *work* from gen9+. For pre-BDW we instead have the option for 389 * 32K pages, but we don't currently have any support for it in our 390 * driver. 391 */ 392 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) && 393 GRAPHICS_VER(i915) <= 10) 394 intel_uncore_rmw(uncore, 395 GEN8_GAMW_ECO_DEV_RW_IA, 396 0, 397 GAMW_ECO_ENABLE_64K_IPS_FIELD); 398 399 if (IS_GRAPHICS_VER(i915, 8, 11)) { 400 bool can_use_gtt_cache = true; 401 402 /* 403 * According to the BSpec if we use 2M/1G pages then we also 404 * need to disable the GTT cache. At least on BDW we can see 405 * visual corruption when using 2M pages, and not disabling the 406 * GTT cache. 407 */ 408 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M)) 409 can_use_gtt_cache = false; 410 411 /* WaGttCachingOffByDefault */ 412 intel_uncore_write(uncore, 413 HSW_GTT_CACHE_EN, 414 can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0); 415 drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache && 416 intel_uncore_read(uncore, 417 HSW_GTT_CACHE_EN) == 0); 418 } 419 } 420 421 static void tgl_setup_private_ppat(struct intel_uncore *uncore) 422 { 423 /* TGL doesn't support LLC or AGE settings */ 424 intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB); 425 intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC); 426 intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT); 427 intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC); 428 intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB); 429 intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB); 430 intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB); 431 intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB); 432 } 433 434 static void icl_setup_private_ppat(struct intel_uncore *uncore) 435 { 436 intel_uncore_write(uncore, 437 GEN10_PAT_INDEX(0), 438 GEN8_PPAT_WB | GEN8_PPAT_LLC); 439 intel_uncore_write(uncore, 440 GEN10_PAT_INDEX(1), 441 GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); 442 intel_uncore_write(uncore, 443 GEN10_PAT_INDEX(2), 444 GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE); 445 intel_uncore_write(uncore, 446 GEN10_PAT_INDEX(3), 447 GEN8_PPAT_UC); 448 intel_uncore_write(uncore, 449 GEN10_PAT_INDEX(4), 450 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); 451 intel_uncore_write(uncore, 452 GEN10_PAT_INDEX(5), 453 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); 454 intel_uncore_write(uncore, 455 GEN10_PAT_INDEX(6), 456 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); 457 intel_uncore_write(uncore, 458 GEN10_PAT_INDEX(7), 459 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); 460 } 461 462 /* 463 * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability 464 * bits. When using advanced contexts each context stores its own PAT, but 465 * writing this data shouldn't be harmful even in those cases. 466 */ 467 static void bdw_setup_private_ppat(struct intel_uncore *uncore) 468 { 469 struct drm_i915_private *i915 = uncore->i915; 470 u64 pat; 471 472 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ 473 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ 474 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ 475 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | 476 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | 477 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | 478 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); 479 480 /* for scanout with eLLC */ 481 if (GRAPHICS_VER(i915) >= 9) 482 pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE); 483 else 484 pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); 485 486 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); 487 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); 488 } 489 490 static void chv_setup_private_ppat(struct intel_uncore *uncore) 491 { 492 u64 pat; 493 494 /* 495 * Map WB on BDW to snooped on CHV. 496 * 497 * Only the snoop bit has meaning for CHV, the rest is 498 * ignored. 499 * 500 * The hardware will never snoop for certain types of accesses: 501 * - CPU GTT (GMADR->GGTT->no snoop->memory) 502 * - PPGTT page tables 503 * - some other special cycles 504 * 505 * As with BDW, we also need to consider the following for GT accesses: 506 * "For GGTT, there is NO pat_sel[2:0] from the entry, 507 * so RTL will always use the value corresponding to 508 * pat_sel = 000". 509 * Which means we must set the snoop bit in PAT entry 0 510 * in order to keep the global status page working. 511 */ 512 513 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | 514 GEN8_PPAT(1, 0) | 515 GEN8_PPAT(2, 0) | 516 GEN8_PPAT(3, 0) | 517 GEN8_PPAT(4, CHV_PPAT_SNOOP) | 518 GEN8_PPAT(5, CHV_PPAT_SNOOP) | 519 GEN8_PPAT(6, CHV_PPAT_SNOOP) | 520 GEN8_PPAT(7, CHV_PPAT_SNOOP); 521 522 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); 523 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); 524 } 525 526 void setup_private_pat(struct intel_uncore *uncore) 527 { 528 struct drm_i915_private *i915 = uncore->i915; 529 530 GEM_BUG_ON(GRAPHICS_VER(i915) < 8); 531 532 if (GRAPHICS_VER(i915) >= 12) 533 tgl_setup_private_ppat(uncore); 534 else if (GRAPHICS_VER(i915) >= 11) 535 icl_setup_private_ppat(uncore); 536 else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915)) 537 chv_setup_private_ppat(uncore); 538 else 539 bdw_setup_private_ppat(uncore); 540 } 541 542 struct i915_vma * 543 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size) 544 { 545 struct drm_i915_gem_object *obj; 546 struct i915_vma *vma; 547 548 obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size)); 549 if (IS_ERR(obj)) 550 return ERR_CAST(obj); 551 552 i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED); 553 554 vma = i915_vma_instance(obj, vm, NULL); 555 if (IS_ERR(vma)) { 556 i915_gem_object_put(obj); 557 return vma; 558 } 559 560 return vma; 561 } 562 563 struct i915_vma * 564 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size) 565 { 566 struct i915_vma *vma; 567 int err; 568 569 vma = __vm_create_scratch_for_read(vm, size); 570 if (IS_ERR(vma)) 571 return vma; 572 573 err = i915_vma_pin(vma, 0, 0, 574 i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER); 575 if (err) { 576 i915_vma_put(vma); 577 return ERR_PTR(err); 578 } 579 580 return vma; 581 } 582 583 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 584 #include "selftests/mock_gtt.c" 585 #endif 586