1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "intel_context.h" 8 #include "intel_gpu_commands.h" 9 #include "intel_gt.h" 10 #include "intel_gtt.h" 11 #include "intel_migrate.h" 12 #include "intel_ring.h" 13 14 struct insert_pte_data { 15 u64 offset; 16 }; 17 18 #define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */ 19 20 #define GET_CCS_BYTES(i915, size) (HAS_FLAT_CCS(i915) ? \ 21 DIV_ROUND_UP(size, NUM_BYTES_PER_CCS_BYTE) : 0) 22 static bool engine_supports_migration(struct intel_engine_cs *engine) 23 { 24 if (!engine) 25 return false; 26 27 /* 28 * We need the ability to prevent aribtration (MI_ARB_ON_OFF), 29 * the ability to write PTE using inline data (MI_STORE_DATA) 30 * and of course the ability to do the block transfer (blits). 31 */ 32 GEM_BUG_ON(engine->class != COPY_ENGINE_CLASS); 33 34 return true; 35 } 36 37 static void xehpsdv_toggle_pdes(struct i915_address_space *vm, 38 struct i915_page_table *pt, 39 void *data) 40 { 41 struct insert_pte_data *d = data; 42 43 /* 44 * Insert a dummy PTE into every PT that will map to LMEM to ensure 45 * we have a correctly setup PDE structure for later use. 46 */ 47 vm->insert_page(vm, 0, d->offset, I915_CACHE_NONE, PTE_LM); 48 GEM_BUG_ON(!pt->is_compact); 49 d->offset += SZ_2M; 50 } 51 52 static void xehpsdv_insert_pte(struct i915_address_space *vm, 53 struct i915_page_table *pt, 54 void *data) 55 { 56 struct insert_pte_data *d = data; 57 58 /* 59 * We are playing tricks here, since the actual pt, from the hw 60 * pov, is only 256bytes with 32 entries, or 4096bytes with 512 61 * entries, but we are still guaranteed that the physical 62 * alignment is 64K underneath for the pt, and we are careful 63 * not to access the space in the void. 64 */ 65 vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE, PTE_LM); 66 d->offset += SZ_64K; 67 } 68 69 static void insert_pte(struct i915_address_space *vm, 70 struct i915_page_table *pt, 71 void *data) 72 { 73 struct insert_pte_data *d = data; 74 75 vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE, 76 i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0); 77 d->offset += PAGE_SIZE; 78 } 79 80 static struct i915_address_space *migrate_vm(struct intel_gt *gt) 81 { 82 struct i915_vm_pt_stash stash = {}; 83 struct i915_ppgtt *vm; 84 int err; 85 int i; 86 87 /* 88 * We construct a very special VM for use by all migration contexts, 89 * it is kept pinned so that it can be used at any time. As we need 90 * to pre-allocate the page directories for the migration VM, this 91 * limits us to only using a small number of prepared vma. 92 * 93 * To be able to pipeline and reschedule migration operations while 94 * avoiding unnecessary contention on the vm itself, the PTE updates 95 * are inline with the blits. All the blits use the same fixed 96 * addresses, with the backing store redirection being updated on the 97 * fly. Only 2 implicit vma are used for all migration operations. 98 * 99 * We lay the ppGTT out as: 100 * 101 * [0, CHUNK_SZ) -> first object 102 * [CHUNK_SZ, 2 * CHUNK_SZ) -> second object 103 * [2 * CHUNK_SZ, 2 * CHUNK_SZ + 2 * CHUNK_SZ >> 9] -> PTE 104 * 105 * By exposing the dma addresses of the page directories themselves 106 * within the ppGTT, we are then able to rewrite the PTE prior to use. 107 * But the PTE update and subsequent migration operation must be atomic, 108 * i.e. within the same non-preemptible window so that we do not switch 109 * to another migration context that overwrites the PTE. 110 * 111 * This changes quite a bit on platforms with HAS_64K_PAGES support, 112 * where we instead have three windows, each CHUNK_SIZE in size. The 113 * first is reserved for mapping system-memory, and that just uses the 114 * 512 entry layout using 4K GTT pages. The other two windows just map 115 * lmem pages and must use the new compact 32 entry layout using 64K GTT 116 * pages, which ensures we can address any lmem object that the user 117 * throws at us. We then also use the xehpsdv_toggle_pdes as a way of 118 * just toggling the PDE bit(GEN12_PDE_64K) for us, to enable the 119 * compact layout for each of these page-tables, that fall within the 120 * [CHUNK_SIZE, 3 * CHUNK_SIZE) range. 121 * 122 * We lay the ppGTT out as: 123 * 124 * [0, CHUNK_SZ) -> first window/object, maps smem 125 * [CHUNK_SZ, 2 * CHUNK_SZ) -> second window/object, maps lmem src 126 * [2 * CHUNK_SZ, 3 * CHUNK_SZ) -> third window/object, maps lmem dst 127 * 128 * For the PTE window it's also quite different, since each PTE must 129 * point to some 64K page, one for each PT(since it's in lmem), and yet 130 * each is only <= 4096bytes, but since the unused space within that PTE 131 * range is never touched, this should be fine. 132 * 133 * So basically each PT now needs 64K of virtual memory, instead of 4K, 134 * which looks like: 135 * 136 * [3 * CHUNK_SZ, 3 * CHUNK_SZ + ((3 * CHUNK_SZ / SZ_2M) * SZ_64K)] -> PTE 137 */ 138 139 vm = i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY); 140 if (IS_ERR(vm)) 141 return ERR_CAST(vm); 142 143 if (!vm->vm.allocate_va_range || !vm->vm.foreach) { 144 err = -ENODEV; 145 goto err_vm; 146 } 147 148 if (HAS_64K_PAGES(gt->i915)) 149 stash.pt_sz = I915_GTT_PAGE_SIZE_64K; 150 151 /* 152 * Each engine instance is assigned its own chunk in the VM, so 153 * that we can run multiple instances concurrently 154 */ 155 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) { 156 struct intel_engine_cs *engine; 157 u64 base = (u64)i << 32; 158 struct insert_pte_data d = {}; 159 struct i915_gem_ww_ctx ww; 160 u64 sz; 161 162 engine = gt->engine_class[COPY_ENGINE_CLASS][i]; 163 if (!engine_supports_migration(engine)) 164 continue; 165 166 /* 167 * We copy in 8MiB chunks. Each PDE covers 2MiB, so we need 168 * 4x2 page directories for source/destination. 169 */ 170 if (HAS_64K_PAGES(gt->i915)) 171 sz = 3 * CHUNK_SZ; 172 else 173 sz = 2 * CHUNK_SZ; 174 d.offset = base + sz; 175 176 /* 177 * We need another page directory setup so that we can write 178 * the 8x512 PTE in each chunk. 179 */ 180 if (HAS_64K_PAGES(gt->i915)) 181 sz += (sz / SZ_2M) * SZ_64K; 182 else 183 sz += (sz >> 12) * sizeof(u64); 184 185 err = i915_vm_alloc_pt_stash(&vm->vm, &stash, sz); 186 if (err) 187 goto err_vm; 188 189 for_i915_gem_ww(&ww, err, true) { 190 err = i915_vm_lock_objects(&vm->vm, &ww); 191 if (err) 192 continue; 193 err = i915_vm_map_pt_stash(&vm->vm, &stash); 194 if (err) 195 continue; 196 197 vm->vm.allocate_va_range(&vm->vm, &stash, base, sz); 198 } 199 i915_vm_free_pt_stash(&vm->vm, &stash); 200 if (err) 201 goto err_vm; 202 203 /* Now allow the GPU to rewrite the PTE via its own ppGTT */ 204 if (HAS_64K_PAGES(gt->i915)) { 205 vm->vm.foreach(&vm->vm, base, d.offset - base, 206 xehpsdv_insert_pte, &d); 207 d.offset = base + CHUNK_SZ; 208 vm->vm.foreach(&vm->vm, 209 d.offset, 210 2 * CHUNK_SZ, 211 xehpsdv_toggle_pdes, &d); 212 } else { 213 vm->vm.foreach(&vm->vm, base, d.offset - base, 214 insert_pte, &d); 215 } 216 } 217 218 return &vm->vm; 219 220 err_vm: 221 i915_vm_put(&vm->vm); 222 return ERR_PTR(err); 223 } 224 225 static struct intel_engine_cs *first_copy_engine(struct intel_gt *gt) 226 { 227 struct intel_engine_cs *engine; 228 int i; 229 230 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) { 231 engine = gt->engine_class[COPY_ENGINE_CLASS][i]; 232 if (engine_supports_migration(engine)) 233 return engine; 234 } 235 236 return NULL; 237 } 238 239 static struct intel_context *pinned_context(struct intel_gt *gt) 240 { 241 static struct lock_class_key key; 242 struct intel_engine_cs *engine; 243 struct i915_address_space *vm; 244 struct intel_context *ce; 245 246 engine = first_copy_engine(gt); 247 if (!engine) 248 return ERR_PTR(-ENODEV); 249 250 vm = migrate_vm(gt); 251 if (IS_ERR(vm)) 252 return ERR_CAST(vm); 253 254 ce = intel_engine_create_pinned_context(engine, vm, SZ_512K, 255 I915_GEM_HWS_MIGRATE, 256 &key, "migrate"); 257 i915_vm_put(vm); 258 return ce; 259 } 260 261 int intel_migrate_init(struct intel_migrate *m, struct intel_gt *gt) 262 { 263 struct intel_context *ce; 264 265 memset(m, 0, sizeof(*m)); 266 267 ce = pinned_context(gt); 268 if (IS_ERR(ce)) 269 return PTR_ERR(ce); 270 271 m->context = ce; 272 return 0; 273 } 274 275 static int random_index(unsigned int max) 276 { 277 return upper_32_bits(mul_u32_u32(get_random_u32(), max)); 278 } 279 280 static struct intel_context *__migrate_engines(struct intel_gt *gt) 281 { 282 struct intel_engine_cs *engines[MAX_ENGINE_INSTANCE]; 283 struct intel_engine_cs *engine; 284 unsigned int count, i; 285 286 count = 0; 287 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) { 288 engine = gt->engine_class[COPY_ENGINE_CLASS][i]; 289 if (engine_supports_migration(engine)) 290 engines[count++] = engine; 291 } 292 293 return intel_context_create(engines[random_index(count)]); 294 } 295 296 struct intel_context *intel_migrate_create_context(struct intel_migrate *m) 297 { 298 struct intel_context *ce; 299 300 /* 301 * We randomly distribute contexts across the engines upon constrction, 302 * as they all share the same pinned vm, and so in order to allow 303 * multiple blits to run in parallel, we must construct each blit 304 * to use a different range of the vm for its GTT. This has to be 305 * known at construction, so we can not use the late greedy load 306 * balancing of the virtual-engine. 307 */ 308 ce = __migrate_engines(m->context->engine->gt); 309 if (IS_ERR(ce)) 310 return ce; 311 312 ce->ring = NULL; 313 ce->ring_size = SZ_256K; 314 315 i915_vm_put(ce->vm); 316 ce->vm = i915_vm_get(m->context->vm); 317 318 return ce; 319 } 320 321 static inline struct sgt_dma sg_sgt(struct scatterlist *sg) 322 { 323 dma_addr_t addr = sg_dma_address(sg); 324 325 return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) }; 326 } 327 328 static int emit_no_arbitration(struct i915_request *rq) 329 { 330 u32 *cs; 331 332 cs = intel_ring_begin(rq, 2); 333 if (IS_ERR(cs)) 334 return PTR_ERR(cs); 335 336 /* Explicitly disable preemption for this request. */ 337 *cs++ = MI_ARB_ON_OFF; 338 *cs++ = MI_NOOP; 339 intel_ring_advance(rq, cs); 340 341 return 0; 342 } 343 344 static int emit_pte(struct i915_request *rq, 345 struct sgt_dma *it, 346 enum i915_cache_level cache_level, 347 bool is_lmem, 348 u64 offset, 349 int length) 350 { 351 bool has_64K_pages = HAS_64K_PAGES(rq->engine->i915); 352 const u64 encode = rq->context->vm->pte_encode(0, cache_level, 353 is_lmem ? PTE_LM : 0); 354 struct intel_ring *ring = rq->ring; 355 int pkt, dword_length; 356 u32 total = 0; 357 u32 page_size; 358 u32 *hdr, *cs; 359 360 GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8); 361 362 page_size = I915_GTT_PAGE_SIZE; 363 dword_length = 0x400; 364 365 /* Compute the page directory offset for the target address range */ 366 if (has_64K_pages) { 367 GEM_BUG_ON(!IS_ALIGNED(offset, SZ_2M)); 368 369 offset /= SZ_2M; 370 offset *= SZ_64K; 371 offset += 3 * CHUNK_SZ; 372 373 if (is_lmem) { 374 page_size = I915_GTT_PAGE_SIZE_64K; 375 dword_length = 0x40; 376 } 377 } else { 378 offset >>= 12; 379 offset *= sizeof(u64); 380 offset += 2 * CHUNK_SZ; 381 } 382 383 offset += (u64)rq->engine->instance << 32; 384 385 cs = intel_ring_begin(rq, 6); 386 if (IS_ERR(cs)) 387 return PTR_ERR(cs); 388 389 /* Pack as many PTE updates as possible into a single MI command */ 390 pkt = min_t(int, dword_length, ring->space / sizeof(u32) + 5); 391 pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5); 392 393 hdr = cs; 394 *cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */ 395 *cs++ = lower_32_bits(offset); 396 *cs++ = upper_32_bits(offset); 397 398 do { 399 if (cs - hdr >= pkt) { 400 int dword_rem; 401 402 *hdr += cs - hdr - 2; 403 *cs++ = MI_NOOP; 404 405 ring->emit = (void *)cs - ring->vaddr; 406 intel_ring_advance(rq, cs); 407 intel_ring_update_space(ring); 408 409 cs = intel_ring_begin(rq, 6); 410 if (IS_ERR(cs)) 411 return PTR_ERR(cs); 412 413 dword_rem = dword_length; 414 if (has_64K_pages) { 415 if (IS_ALIGNED(total, SZ_2M)) { 416 offset = round_up(offset, SZ_64K); 417 } else { 418 dword_rem = SZ_2M - (total & (SZ_2M - 1)); 419 dword_rem /= page_size; 420 dword_rem *= 2; 421 } 422 } 423 424 pkt = min_t(int, dword_rem, ring->space / sizeof(u32) + 5); 425 pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5); 426 427 hdr = cs; 428 *cs++ = MI_STORE_DATA_IMM | REG_BIT(21); 429 *cs++ = lower_32_bits(offset); 430 *cs++ = upper_32_bits(offset); 431 } 432 433 GEM_BUG_ON(!IS_ALIGNED(it->dma, page_size)); 434 435 *cs++ = lower_32_bits(encode | it->dma); 436 *cs++ = upper_32_bits(encode | it->dma); 437 438 offset += 8; 439 total += page_size; 440 441 it->dma += page_size; 442 if (it->dma >= it->max) { 443 it->sg = __sg_next(it->sg); 444 if (!it->sg || sg_dma_len(it->sg) == 0) 445 break; 446 447 it->dma = sg_dma_address(it->sg); 448 it->max = it->dma + sg_dma_len(it->sg); 449 } 450 } while (total < length); 451 452 *hdr += cs - hdr - 2; 453 *cs++ = MI_NOOP; 454 455 ring->emit = (void *)cs - ring->vaddr; 456 intel_ring_advance(rq, cs); 457 intel_ring_update_space(ring); 458 459 return total; 460 } 461 462 static bool wa_1209644611_applies(int ver, u32 size) 463 { 464 u32 height = size >> PAGE_SHIFT; 465 466 if (ver != 11) 467 return false; 468 469 return height % 4 == 3 && height <= 8; 470 } 471 472 /** 473 * DOC: Flat-CCS - Memory compression for Local memory 474 * 475 * On Xe-HP and later devices, we use dedicated compression control state (CCS) 476 * stored in local memory for each surface, to support the 3D and media 477 * compression formats. 478 * 479 * The memory required for the CCS of the entire local memory is 1/256 of the 480 * local memory size. So before the kernel boot, the required memory is reserved 481 * for the CCS data and a secure register will be programmed with the CCS base 482 * address. 483 * 484 * Flat CCS data needs to be cleared when a lmem object is allocated. 485 * And CCS data can be copied in and out of CCS region through 486 * XY_CTRL_SURF_COPY_BLT. CPU can't access the CCS data directly. 487 * 488 * When we exhaust the lmem, if the object's placements support smem, then we can 489 * directly decompress the compressed lmem object into smem and start using it 490 * from smem itself. 491 * 492 * But when we need to swapout the compressed lmem object into a smem region 493 * though objects' placement doesn't support smem, then we copy the lmem content 494 * as it is into smem region along with ccs data (using XY_CTRL_SURF_COPY_BLT). 495 * When the object is referred, lmem content will be swaped in along with 496 * restoration of the CCS data (using XY_CTRL_SURF_COPY_BLT) at corresponding 497 * location. 498 */ 499 500 static inline u32 *i915_flush_dw(u32 *cmd, u32 flags) 501 { 502 *cmd++ = MI_FLUSH_DW | flags; 503 *cmd++ = 0; 504 *cmd++ = 0; 505 506 return cmd; 507 } 508 509 static u32 calc_ctrl_surf_instr_size(struct drm_i915_private *i915, int size) 510 { 511 u32 num_cmds, num_blks, total_size; 512 513 if (!GET_CCS_BYTES(i915, size)) 514 return 0; 515 516 /* 517 * XY_CTRL_SURF_COPY_BLT transfers CCS in 256 byte 518 * blocks. one XY_CTRL_SURF_COPY_BLT command can 519 * transfer upto 1024 blocks. 520 */ 521 num_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size), 522 NUM_CCS_BYTES_PER_BLOCK); 523 num_cmds = DIV_ROUND_UP(num_blks, NUM_CCS_BLKS_PER_XFER); 524 total_size = XY_CTRL_SURF_INSTR_SIZE * num_cmds; 525 526 /* 527 * Adding a flush before and after XY_CTRL_SURF_COPY_BLT 528 */ 529 total_size += 2 * MI_FLUSH_DW_SIZE; 530 531 return total_size; 532 } 533 534 static int emit_copy_ccs(struct i915_request *rq, 535 u32 dst_offset, u8 dst_access, 536 u32 src_offset, u8 src_access, int size) 537 { 538 struct drm_i915_private *i915 = rq->engine->i915; 539 int mocs = rq->engine->gt->mocs.uc_index << 1; 540 u32 num_ccs_blks, ccs_ring_size; 541 u32 *cs; 542 543 ccs_ring_size = calc_ctrl_surf_instr_size(i915, size); 544 WARN_ON(!ccs_ring_size); 545 546 cs = intel_ring_begin(rq, round_up(ccs_ring_size, 2)); 547 if (IS_ERR(cs)) 548 return PTR_ERR(cs); 549 550 num_ccs_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size), 551 NUM_CCS_BYTES_PER_BLOCK); 552 GEM_BUG_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER); 553 cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS); 554 555 /* 556 * The XY_CTRL_SURF_COPY_BLT instruction is used to copy the CCS 557 * data in and out of the CCS region. 558 * 559 * We can copy at most 1024 blocks of 256 bytes using one 560 * XY_CTRL_SURF_COPY_BLT instruction. 561 * 562 * In case we need to copy more than 1024 blocks, we need to add 563 * another instruction to the same batch buffer. 564 * 565 * 1024 blocks of 256 bytes of CCS represent a total 256KB of CCS. 566 * 567 * 256 KB of CCS represents 256 * 256 KB = 64 MB of LMEM. 568 */ 569 *cs++ = XY_CTRL_SURF_COPY_BLT | 570 src_access << SRC_ACCESS_TYPE_SHIFT | 571 dst_access << DST_ACCESS_TYPE_SHIFT | 572 ((num_ccs_blks - 1) & CCS_SIZE_MASK) << CCS_SIZE_SHIFT; 573 *cs++ = src_offset; 574 *cs++ = rq->engine->instance | 575 FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs); 576 *cs++ = dst_offset; 577 *cs++ = rq->engine->instance | 578 FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs); 579 580 cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS); 581 if (ccs_ring_size & 1) 582 *cs++ = MI_NOOP; 583 584 intel_ring_advance(rq, cs); 585 586 return 0; 587 } 588 589 static int emit_copy(struct i915_request *rq, 590 u32 dst_offset, u32 src_offset, int size) 591 { 592 const int ver = GRAPHICS_VER(rq->engine->i915); 593 u32 instance = rq->engine->instance; 594 u32 *cs; 595 596 cs = intel_ring_begin(rq, ver >= 8 ? 10 : 6); 597 if (IS_ERR(cs)) 598 return PTR_ERR(cs); 599 600 if (ver >= 9 && !wa_1209644611_applies(ver, size)) { 601 *cs++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2); 602 *cs++ = BLT_DEPTH_32 | PAGE_SIZE; 603 *cs++ = 0; 604 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; 605 *cs++ = dst_offset; 606 *cs++ = instance; 607 *cs++ = 0; 608 *cs++ = PAGE_SIZE; 609 *cs++ = src_offset; 610 *cs++ = instance; 611 } else if (ver >= 8) { 612 *cs++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2); 613 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE; 614 *cs++ = 0; 615 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; 616 *cs++ = dst_offset; 617 *cs++ = instance; 618 *cs++ = 0; 619 *cs++ = PAGE_SIZE; 620 *cs++ = src_offset; 621 *cs++ = instance; 622 } else { 623 GEM_BUG_ON(instance); 624 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2); 625 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE; 626 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE; 627 *cs++ = dst_offset; 628 *cs++ = PAGE_SIZE; 629 *cs++ = src_offset; 630 } 631 632 intel_ring_advance(rq, cs); 633 return 0; 634 } 635 636 static int scatter_list_length(struct scatterlist *sg) 637 { 638 int len = 0; 639 640 while (sg && sg_dma_len(sg)) { 641 len += sg_dma_len(sg); 642 sg = sg_next(sg); 643 }; 644 645 return len; 646 } 647 648 static void 649 calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem, 650 int *src_sz, int *ccs_sz, u32 bytes_to_cpy, 651 u32 ccs_bytes_to_cpy) 652 { 653 if (ccs_bytes_to_cpy) { 654 /* 655 * We can only copy the ccs data corresponding to 656 * the CHUNK_SZ of lmem which is 657 * GET_CCS_BYTES(i915, CHUNK_SZ)) 658 */ 659 *ccs_sz = min_t(int, ccs_bytes_to_cpy, GET_CCS_BYTES(i915, CHUNK_SZ)); 660 661 if (!src_is_lmem) 662 /* 663 * When CHUNK_SZ is passed all the pages upto CHUNK_SZ 664 * will be taken for the blt. in Flat-ccs supported 665 * platform Smem obj will have more pages than required 666 * for main meory hence limit it to the required size 667 * for main memory 668 */ 669 *src_sz = min_t(int, bytes_to_cpy, CHUNK_SZ); 670 } else { /* ccs handling is not required */ 671 *src_sz = CHUNK_SZ; 672 } 673 } 674 675 static void get_ccs_sg_sgt(struct sgt_dma *it, u32 bytes_to_cpy) 676 { 677 u32 len; 678 679 do { 680 GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg)); 681 len = it->max - it->dma; 682 if (len > bytes_to_cpy) { 683 it->dma += bytes_to_cpy; 684 break; 685 } 686 687 bytes_to_cpy -= len; 688 689 it->sg = __sg_next(it->sg); 690 it->dma = sg_dma_address(it->sg); 691 it->max = it->dma + sg_dma_len(it->sg); 692 } while (bytes_to_cpy); 693 } 694 695 int 696 intel_context_migrate_copy(struct intel_context *ce, 697 const struct i915_deps *deps, 698 struct scatterlist *src, 699 enum i915_cache_level src_cache_level, 700 bool src_is_lmem, 701 struct scatterlist *dst, 702 enum i915_cache_level dst_cache_level, 703 bool dst_is_lmem, 704 struct i915_request **out) 705 { 706 struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs; 707 struct drm_i915_private *i915 = ce->engine->i915; 708 u32 ccs_bytes_to_cpy = 0, bytes_to_cpy; 709 enum i915_cache_level ccs_cache_level; 710 int src_sz, dst_sz, ccs_sz; 711 u32 src_offset, dst_offset; 712 u8 src_access, dst_access; 713 struct i915_request *rq; 714 bool ccs_is_src; 715 int err; 716 717 GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm); 718 GEM_BUG_ON(IS_DGFX(ce->engine->i915) && (!src_is_lmem && !dst_is_lmem)); 719 *out = NULL; 720 721 GEM_BUG_ON(ce->ring->size < SZ_64K); 722 723 src_sz = scatter_list_length(src); 724 bytes_to_cpy = src_sz; 725 726 if (HAS_FLAT_CCS(i915) && src_is_lmem ^ dst_is_lmem) { 727 src_access = !src_is_lmem && dst_is_lmem; 728 dst_access = !src_access; 729 730 dst_sz = scatter_list_length(dst); 731 if (src_is_lmem) { 732 it_ccs = it_dst; 733 ccs_cache_level = dst_cache_level; 734 ccs_is_src = false; 735 } else if (dst_is_lmem) { 736 bytes_to_cpy = dst_sz; 737 it_ccs = it_src; 738 ccs_cache_level = src_cache_level; 739 ccs_is_src = true; 740 } 741 742 /* 743 * When there is a eviction of ccs needed smem will have the 744 * extra pages for the ccs data 745 * 746 * TO-DO: Want to move the size mismatch check to a WARN_ON, 747 * but still we have some requests of smem->lmem with same size. 748 * Need to fix it. 749 */ 750 ccs_bytes_to_cpy = src_sz != dst_sz ? GET_CCS_BYTES(i915, bytes_to_cpy) : 0; 751 if (ccs_bytes_to_cpy) 752 get_ccs_sg_sgt(&it_ccs, bytes_to_cpy); 753 } 754 755 src_offset = 0; 756 dst_offset = CHUNK_SZ; 757 if (HAS_64K_PAGES(ce->engine->i915)) { 758 src_offset = 0; 759 dst_offset = 0; 760 if (src_is_lmem) 761 src_offset = CHUNK_SZ; 762 if (dst_is_lmem) 763 dst_offset = 2 * CHUNK_SZ; 764 } 765 766 do { 767 int len; 768 769 rq = i915_request_create(ce); 770 if (IS_ERR(rq)) { 771 err = PTR_ERR(rq); 772 goto out_ce; 773 } 774 775 if (deps) { 776 err = i915_request_await_deps(rq, deps); 777 if (err) 778 goto out_rq; 779 780 if (rq->engine->emit_init_breadcrumb) { 781 err = rq->engine->emit_init_breadcrumb(rq); 782 if (err) 783 goto out_rq; 784 } 785 786 deps = NULL; 787 } 788 789 /* The PTE updates + copy must not be interrupted. */ 790 err = emit_no_arbitration(rq); 791 if (err) 792 goto out_rq; 793 794 calculate_chunk_sz(i915, src_is_lmem, &src_sz, &ccs_sz, 795 bytes_to_cpy, ccs_bytes_to_cpy); 796 797 len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem, 798 src_offset, src_sz); 799 if (!len) { 800 err = -EINVAL; 801 goto out_rq; 802 } 803 if (len < 0) { 804 err = len; 805 goto out_rq; 806 } 807 808 err = emit_pte(rq, &it_dst, dst_cache_level, dst_is_lmem, 809 dst_offset, len); 810 if (err < 0) 811 goto out_rq; 812 if (err < len) { 813 err = -EINVAL; 814 goto out_rq; 815 } 816 817 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); 818 if (err) 819 goto out_rq; 820 821 err = emit_copy(rq, dst_offset, src_offset, len); 822 if (err) 823 goto out_rq; 824 825 bytes_to_cpy -= len; 826 827 if (ccs_bytes_to_cpy) { 828 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); 829 if (err) 830 goto out_rq; 831 832 err = emit_pte(rq, &it_ccs, ccs_cache_level, false, 833 ccs_is_src ? src_offset : dst_offset, 834 ccs_sz); 835 836 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); 837 if (err) 838 goto out_rq; 839 840 /* 841 * Using max of src_sz and dst_sz, as we need to 842 * pass the lmem size corresponding to the ccs 843 * blocks we need to handle. 844 */ 845 ccs_sz = max_t(int, ccs_is_src ? ccs_sz : src_sz, 846 ccs_is_src ? dst_sz : ccs_sz); 847 848 err = emit_copy_ccs(rq, dst_offset, dst_access, 849 src_offset, src_access, ccs_sz); 850 if (err) 851 goto out_rq; 852 853 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); 854 if (err) 855 goto out_rq; 856 857 /* Converting back to ccs bytes */ 858 ccs_sz = GET_CCS_BYTES(rq->engine->i915, ccs_sz); 859 ccs_bytes_to_cpy -= ccs_sz; 860 } 861 862 /* Arbitration is re-enabled between requests. */ 863 out_rq: 864 if (*out) 865 i915_request_put(*out); 866 *out = i915_request_get(rq); 867 i915_request_add(rq); 868 869 if (err) 870 break; 871 872 if (!bytes_to_cpy && !ccs_bytes_to_cpy) { 873 if (src_is_lmem) 874 WARN_ON(it_src.sg && sg_dma_len(it_src.sg)); 875 else 876 WARN_ON(it_dst.sg && sg_dma_len(it_dst.sg)); 877 break; 878 } 879 880 if (WARN_ON(!it_src.sg || !sg_dma_len(it_src.sg) || 881 !it_dst.sg || !sg_dma_len(it_dst.sg) || 882 (ccs_bytes_to_cpy && (!it_ccs.sg || 883 !sg_dma_len(it_ccs.sg))))) { 884 err = -EINVAL; 885 break; 886 } 887 888 cond_resched(); 889 } while (1); 890 891 out_ce: 892 return err; 893 } 894 895 static int emit_clear(struct i915_request *rq, u32 offset, int size, 896 u32 value, bool is_lmem) 897 { 898 struct drm_i915_private *i915 = rq->engine->i915; 899 int mocs = rq->engine->gt->mocs.uc_index << 1; 900 const int ver = GRAPHICS_VER(i915); 901 int ring_sz; 902 u32 *cs; 903 904 GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX); 905 906 if (HAS_FLAT_CCS(i915) && ver >= 12) 907 ring_sz = XY_FAST_COLOR_BLT_DW; 908 else if (ver >= 8) 909 ring_sz = 8; 910 else 911 ring_sz = 6; 912 913 cs = intel_ring_begin(rq, ring_sz); 914 if (IS_ERR(cs)) 915 return PTR_ERR(cs); 916 917 if (HAS_FLAT_CCS(i915) && ver >= 12) { 918 *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 | 919 (XY_FAST_COLOR_BLT_DW - 2); 920 *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) | 921 (PAGE_SIZE - 1); 922 *cs++ = 0; 923 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; 924 *cs++ = offset; 925 *cs++ = rq->engine->instance; 926 *cs++ = !is_lmem << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT; 927 /* BG7 */ 928 *cs++ = value; 929 *cs++ = 0; 930 *cs++ = 0; 931 *cs++ = 0; 932 /* BG11 */ 933 *cs++ = 0; 934 *cs++ = 0; 935 /* BG13 */ 936 *cs++ = 0; 937 *cs++ = 0; 938 *cs++ = 0; 939 } else if (ver >= 8) { 940 *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2); 941 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE; 942 *cs++ = 0; 943 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; 944 *cs++ = offset; 945 *cs++ = rq->engine->instance; 946 *cs++ = value; 947 *cs++ = MI_NOOP; 948 } else { 949 *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2); 950 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE; 951 *cs++ = 0; 952 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; 953 *cs++ = offset; 954 *cs++ = value; 955 } 956 957 intel_ring_advance(rq, cs); 958 return 0; 959 } 960 961 int 962 intel_context_migrate_clear(struct intel_context *ce, 963 const struct i915_deps *deps, 964 struct scatterlist *sg, 965 enum i915_cache_level cache_level, 966 bool is_lmem, 967 u32 value, 968 struct i915_request **out) 969 { 970 struct drm_i915_private *i915 = ce->engine->i915; 971 struct sgt_dma it = sg_sgt(sg); 972 struct i915_request *rq; 973 u32 offset; 974 int err; 975 976 GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm); 977 *out = NULL; 978 979 GEM_BUG_ON(ce->ring->size < SZ_64K); 980 981 offset = 0; 982 if (HAS_64K_PAGES(i915) && is_lmem) 983 offset = CHUNK_SZ; 984 985 do { 986 int len; 987 988 rq = i915_request_create(ce); 989 if (IS_ERR(rq)) { 990 err = PTR_ERR(rq); 991 goto out_ce; 992 } 993 994 if (deps) { 995 err = i915_request_await_deps(rq, deps); 996 if (err) 997 goto out_rq; 998 999 if (rq->engine->emit_init_breadcrumb) { 1000 err = rq->engine->emit_init_breadcrumb(rq); 1001 if (err) 1002 goto out_rq; 1003 } 1004 1005 deps = NULL; 1006 } 1007 1008 /* The PTE updates + clear must not be interrupted. */ 1009 err = emit_no_arbitration(rq); 1010 if (err) 1011 goto out_rq; 1012 1013 len = emit_pte(rq, &it, cache_level, is_lmem, offset, CHUNK_SZ); 1014 if (len <= 0) { 1015 err = len; 1016 goto out_rq; 1017 } 1018 1019 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); 1020 if (err) 1021 goto out_rq; 1022 1023 err = emit_clear(rq, offset, len, value, is_lmem); 1024 if (err) 1025 goto out_rq; 1026 1027 if (HAS_FLAT_CCS(i915) && is_lmem && !value) { 1028 /* 1029 * copy the content of memory into corresponding 1030 * ccs surface 1031 */ 1032 err = emit_copy_ccs(rq, offset, INDIRECT_ACCESS, offset, 1033 DIRECT_ACCESS, len); 1034 if (err) 1035 goto out_rq; 1036 } 1037 1038 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); 1039 1040 /* Arbitration is re-enabled between requests. */ 1041 out_rq: 1042 if (*out) 1043 i915_request_put(*out); 1044 *out = i915_request_get(rq); 1045 i915_request_add(rq); 1046 if (err || !it.sg || !sg_dma_len(it.sg)) 1047 break; 1048 1049 cond_resched(); 1050 } while (1); 1051 1052 out_ce: 1053 return err; 1054 } 1055 1056 int intel_migrate_copy(struct intel_migrate *m, 1057 struct i915_gem_ww_ctx *ww, 1058 const struct i915_deps *deps, 1059 struct scatterlist *src, 1060 enum i915_cache_level src_cache_level, 1061 bool src_is_lmem, 1062 struct scatterlist *dst, 1063 enum i915_cache_level dst_cache_level, 1064 bool dst_is_lmem, 1065 struct i915_request **out) 1066 { 1067 struct intel_context *ce; 1068 int err; 1069 1070 *out = NULL; 1071 if (!m->context) 1072 return -ENODEV; 1073 1074 ce = intel_migrate_create_context(m); 1075 if (IS_ERR(ce)) 1076 ce = intel_context_get(m->context); 1077 GEM_BUG_ON(IS_ERR(ce)); 1078 1079 err = intel_context_pin_ww(ce, ww); 1080 if (err) 1081 goto out; 1082 1083 err = intel_context_migrate_copy(ce, deps, 1084 src, src_cache_level, src_is_lmem, 1085 dst, dst_cache_level, dst_is_lmem, 1086 out); 1087 1088 intel_context_unpin(ce); 1089 out: 1090 intel_context_put(ce); 1091 return err; 1092 } 1093 1094 int 1095 intel_migrate_clear(struct intel_migrate *m, 1096 struct i915_gem_ww_ctx *ww, 1097 const struct i915_deps *deps, 1098 struct scatterlist *sg, 1099 enum i915_cache_level cache_level, 1100 bool is_lmem, 1101 u32 value, 1102 struct i915_request **out) 1103 { 1104 struct intel_context *ce; 1105 int err; 1106 1107 *out = NULL; 1108 if (!m->context) 1109 return -ENODEV; 1110 1111 ce = intel_migrate_create_context(m); 1112 if (IS_ERR(ce)) 1113 ce = intel_context_get(m->context); 1114 GEM_BUG_ON(IS_ERR(ce)); 1115 1116 err = intel_context_pin_ww(ce, ww); 1117 if (err) 1118 goto out; 1119 1120 err = intel_context_migrate_clear(ce, deps, sg, cache_level, 1121 is_lmem, value, out); 1122 1123 intel_context_unpin(ce); 1124 out: 1125 intel_context_put(ce); 1126 return err; 1127 } 1128 1129 void intel_migrate_fini(struct intel_migrate *m) 1130 { 1131 struct intel_context *ce; 1132 1133 ce = fetch_and_zero(&m->context); 1134 if (!ce) 1135 return; 1136 1137 intel_engine_destroy_pinned_context(ce); 1138 } 1139 1140 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1141 #include "selftest_migrate.c" 1142 #endif 1143