1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "i915_selftest.h" 7 8 #include "gt/intel_context.h" 9 #include "gt/intel_engine_regs.h" 10 #include "gt/intel_engine_user.h" 11 #include "gt/intel_gpu_commands.h" 12 #include "gt/intel_gt.h" 13 #include "gt/intel_gt_regs.h" 14 #include "gem/i915_gem_lmem.h" 15 16 #include "gem/selftests/igt_gem_utils.h" 17 #include "selftests/igt_flush_test.h" 18 #include "selftests/mock_drm.h" 19 #include "selftests/i915_random.h" 20 #include "huge_gem_object.h" 21 #include "mock_context.h" 22 23 #define OW_SIZE 16 /* in bytes */ 24 #define F_SUBTILE_SIZE 64 /* in bytes */ 25 #define F_TILE_WIDTH 128 /* in bytes */ 26 #define F_TILE_HEIGHT 32 /* in pixels */ 27 #define F_SUBTILE_WIDTH OW_SIZE /* in bytes */ 28 #define F_SUBTILE_HEIGHT 4 /* in pixels */ 29 30 static int linear_x_y_to_ftiled_pos(int x, int y, u32 stride, int bpp) 31 { 32 int tile_base; 33 int tile_x, tile_y; 34 int swizzle, subtile; 35 int pixel_size = bpp / 8; 36 int pos; 37 38 /* 39 * Subtile remapping for F tile. Note that map[a]==b implies map[b]==a 40 * so we can use the same table to tile and until. 41 */ 42 static const u8 f_subtile_map[] = { 43 0, 1, 2, 3, 8, 9, 10, 11, 44 4, 5, 6, 7, 12, 13, 14, 15, 45 16, 17, 18, 19, 24, 25, 26, 27, 46 20, 21, 22, 23, 28, 29, 30, 31, 47 32, 33, 34, 35, 40, 41, 42, 43, 48 36, 37, 38, 39, 44, 45, 46, 47, 49 48, 49, 50, 51, 56, 57, 58, 59, 50 52, 53, 54, 55, 60, 61, 62, 63 51 }; 52 53 x *= pixel_size; 54 /* 55 * Where does the 4k tile start (in bytes)? This is the same for Y and 56 * F so we can use the Y-tile algorithm to get to that point. 57 */ 58 tile_base = 59 y / F_TILE_HEIGHT * stride * F_TILE_HEIGHT + 60 x / F_TILE_WIDTH * 4096; 61 62 /* Find pixel within tile */ 63 tile_x = x % F_TILE_WIDTH; 64 tile_y = y % F_TILE_HEIGHT; 65 66 /* And figure out the subtile within the 4k tile */ 67 subtile = tile_y / F_SUBTILE_HEIGHT * 8 + tile_x / F_SUBTILE_WIDTH; 68 69 /* Swizzle the subtile number according to the bspec diagram */ 70 swizzle = f_subtile_map[subtile]; 71 72 /* Calculate new position */ 73 pos = tile_base + 74 swizzle * F_SUBTILE_SIZE + 75 tile_y % F_SUBTILE_HEIGHT * OW_SIZE + 76 tile_x % F_SUBTILE_WIDTH; 77 78 GEM_BUG_ON(!IS_ALIGNED(pos, pixel_size)); 79 80 return pos / pixel_size * 4; 81 } 82 83 enum client_tiling { 84 CLIENT_TILING_LINEAR, 85 CLIENT_TILING_X, 86 CLIENT_TILING_Y, 87 CLIENT_TILING_4, 88 CLIENT_NUM_TILING_TYPES 89 }; 90 91 #define WIDTH 512 92 #define HEIGHT 32 93 94 struct blit_buffer { 95 struct i915_vma *vma; 96 u32 start_val; 97 enum client_tiling tiling; 98 }; 99 100 struct tiled_blits { 101 struct intel_context *ce; 102 struct blit_buffer buffers[3]; 103 struct blit_buffer scratch; 104 struct i915_vma *batch; 105 u64 hole; 106 u64 align; 107 u32 width; 108 u32 height; 109 }; 110 111 static bool supports_x_tiling(const struct drm_i915_private *i915) 112 { 113 int gen = GRAPHICS_VER(i915); 114 115 if (gen < 12) 116 return true; 117 118 if (!HAS_LMEM(i915) || IS_DG1(i915)) 119 return false; 120 121 return true; 122 } 123 124 static bool fast_blit_ok(const struct blit_buffer *buf) 125 { 126 int gen = GRAPHICS_VER(buf->vma->vm->i915); 127 128 if (gen < 9) 129 return false; 130 131 if (gen < 12) 132 return true; 133 134 /* filter out platforms with unsupported X-tile support in fastblit */ 135 if (buf->tiling == CLIENT_TILING_X && !supports_x_tiling(buf->vma->vm->i915)) 136 return false; 137 138 return true; 139 } 140 141 static int prepare_blit(const struct tiled_blits *t, 142 struct blit_buffer *dst, 143 struct blit_buffer *src, 144 struct drm_i915_gem_object *batch) 145 { 146 const int ver = GRAPHICS_VER(to_i915(batch->base.dev)); 147 bool use_64b_reloc = ver >= 8; 148 u32 src_pitch, dst_pitch; 149 u32 cmd, *cs; 150 151 cs = i915_gem_object_pin_map_unlocked(batch, I915_MAP_WC); 152 if (IS_ERR(cs)) 153 return PTR_ERR(cs); 154 155 if (fast_blit_ok(dst) && fast_blit_ok(src)) { 156 struct intel_gt *gt = t->ce->engine->gt; 157 u32 src_tiles = 0, dst_tiles = 0; 158 u32 src_4t = 0, dst_4t = 0; 159 160 /* Need to program BLIT_CCTL if it is not done previously 161 * before using XY_FAST_COPY_BLT 162 */ 163 *cs++ = MI_LOAD_REGISTER_IMM(1); 164 *cs++ = i915_mmio_reg_offset(BLIT_CCTL(t->ce->engine->mmio_base)); 165 *cs++ = (BLIT_CCTL_SRC_MOCS(gt->mocs.uc_index) | 166 BLIT_CCTL_DST_MOCS(gt->mocs.uc_index)); 167 168 src_pitch = t->width; /* in dwords */ 169 if (src->tiling == CLIENT_TILING_4) { 170 src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR); 171 src_4t = XY_FAST_COPY_BLT_D1_SRC_TILE4; 172 } else if (src->tiling == CLIENT_TILING_Y) { 173 src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR); 174 } else if (src->tiling == CLIENT_TILING_X) { 175 src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(TILE_X); 176 } else { 177 src_pitch *= 4; /* in bytes */ 178 } 179 180 dst_pitch = t->width; /* in dwords */ 181 if (dst->tiling == CLIENT_TILING_4) { 182 dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR); 183 dst_4t = XY_FAST_COPY_BLT_D1_DST_TILE4; 184 } else if (dst->tiling == CLIENT_TILING_Y) { 185 dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR); 186 } else if (dst->tiling == CLIENT_TILING_X) { 187 dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(TILE_X); 188 } else { 189 dst_pitch *= 4; /* in bytes */ 190 } 191 192 *cs++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2) | 193 src_tiles | dst_tiles; 194 *cs++ = src_4t | dst_4t | BLT_DEPTH_32 | dst_pitch; 195 *cs++ = 0; 196 *cs++ = t->height << 16 | t->width; 197 *cs++ = lower_32_bits(dst->vma->node.start); 198 *cs++ = upper_32_bits(dst->vma->node.start); 199 *cs++ = 0; 200 *cs++ = src_pitch; 201 *cs++ = lower_32_bits(src->vma->node.start); 202 *cs++ = upper_32_bits(src->vma->node.start); 203 } else { 204 if (ver >= 6) { 205 *cs++ = MI_LOAD_REGISTER_IMM(1); 206 *cs++ = i915_mmio_reg_offset(BCS_SWCTRL); 207 cmd = (BCS_SRC_Y | BCS_DST_Y) << 16; 208 if (src->tiling == CLIENT_TILING_Y) 209 cmd |= BCS_SRC_Y; 210 if (dst->tiling == CLIENT_TILING_Y) 211 cmd |= BCS_DST_Y; 212 *cs++ = cmd; 213 214 cmd = MI_FLUSH_DW; 215 if (ver >= 8) 216 cmd++; 217 *cs++ = cmd; 218 *cs++ = 0; 219 *cs++ = 0; 220 *cs++ = 0; 221 } 222 223 cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (8 - 2); 224 if (ver >= 8) 225 cmd += 2; 226 227 src_pitch = t->width * 4; 228 if (src->tiling) { 229 cmd |= XY_SRC_COPY_BLT_SRC_TILED; 230 src_pitch /= 4; 231 } 232 233 dst_pitch = t->width * 4; 234 if (dst->tiling) { 235 cmd |= XY_SRC_COPY_BLT_DST_TILED; 236 dst_pitch /= 4; 237 } 238 239 *cs++ = cmd; 240 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch; 241 *cs++ = 0; 242 *cs++ = t->height << 16 | t->width; 243 *cs++ = lower_32_bits(dst->vma->node.start); 244 if (use_64b_reloc) 245 *cs++ = upper_32_bits(dst->vma->node.start); 246 *cs++ = 0; 247 *cs++ = src_pitch; 248 *cs++ = lower_32_bits(src->vma->node.start); 249 if (use_64b_reloc) 250 *cs++ = upper_32_bits(src->vma->node.start); 251 } 252 253 *cs++ = MI_BATCH_BUFFER_END; 254 255 i915_gem_object_flush_map(batch); 256 i915_gem_object_unpin_map(batch); 257 258 return 0; 259 } 260 261 static void tiled_blits_destroy_buffers(struct tiled_blits *t) 262 { 263 int i; 264 265 for (i = 0; i < ARRAY_SIZE(t->buffers); i++) 266 i915_vma_put(t->buffers[i].vma); 267 268 i915_vma_put(t->scratch.vma); 269 i915_vma_put(t->batch); 270 } 271 272 static struct i915_vma * 273 __create_vma(struct tiled_blits *t, size_t size, bool lmem) 274 { 275 struct drm_i915_private *i915 = t->ce->vm->i915; 276 struct drm_i915_gem_object *obj; 277 struct i915_vma *vma; 278 279 if (lmem) 280 obj = i915_gem_object_create_lmem(i915, size, 0); 281 else 282 obj = i915_gem_object_create_shmem(i915, size); 283 if (IS_ERR(obj)) 284 return ERR_CAST(obj); 285 286 vma = i915_vma_instance(obj, t->ce->vm, NULL); 287 if (IS_ERR(vma)) 288 i915_gem_object_put(obj); 289 290 return vma; 291 } 292 293 static struct i915_vma *create_vma(struct tiled_blits *t, bool lmem) 294 { 295 return __create_vma(t, PAGE_ALIGN(t->width * t->height * 4), lmem); 296 } 297 298 static int tiled_blits_create_buffers(struct tiled_blits *t, 299 int width, int height, 300 struct rnd_state *prng) 301 { 302 struct drm_i915_private *i915 = t->ce->engine->i915; 303 int i; 304 305 t->width = width; 306 t->height = height; 307 308 t->batch = __create_vma(t, PAGE_SIZE, false); 309 if (IS_ERR(t->batch)) 310 return PTR_ERR(t->batch); 311 312 t->scratch.vma = create_vma(t, false); 313 if (IS_ERR(t->scratch.vma)) { 314 i915_vma_put(t->batch); 315 return PTR_ERR(t->scratch.vma); 316 } 317 318 for (i = 0; i < ARRAY_SIZE(t->buffers); i++) { 319 struct i915_vma *vma; 320 321 vma = create_vma(t, HAS_LMEM(i915) && i % 2); 322 if (IS_ERR(vma)) { 323 tiled_blits_destroy_buffers(t); 324 return PTR_ERR(vma); 325 } 326 327 t->buffers[i].vma = vma; 328 t->buffers[i].tiling = 329 i915_prandom_u32_max_state(CLIENT_NUM_TILING_TYPES, prng); 330 331 /* Platforms support either TileY or Tile4, not both */ 332 if (HAS_4TILE(i915) && t->buffers[i].tiling == CLIENT_TILING_Y) 333 t->buffers[i].tiling = CLIENT_TILING_4; 334 else if (!HAS_4TILE(i915) && t->buffers[i].tiling == CLIENT_TILING_4) 335 t->buffers[i].tiling = CLIENT_TILING_Y; 336 } 337 338 return 0; 339 } 340 341 static void fill_scratch(struct tiled_blits *t, u32 *vaddr, u32 val) 342 { 343 int i; 344 345 t->scratch.start_val = val; 346 for (i = 0; i < t->width * t->height; i++) 347 vaddr[i] = val++; 348 349 i915_gem_object_flush_map(t->scratch.vma->obj); 350 } 351 352 static u64 swizzle_bit(unsigned int bit, u64 offset) 353 { 354 return (offset & BIT_ULL(bit)) >> (bit - 6); 355 } 356 357 static u64 tiled_offset(const struct intel_gt *gt, 358 u64 v, 359 unsigned int stride, 360 enum client_tiling tiling, 361 int x_pos, int y_pos) 362 { 363 unsigned int swizzle; 364 u64 x, y; 365 366 if (tiling == CLIENT_TILING_LINEAR) 367 return v; 368 369 y = div64_u64_rem(v, stride, &x); 370 371 if (tiling == CLIENT_TILING_4) { 372 v = linear_x_y_to_ftiled_pos(x_pos, y_pos, stride, 32); 373 374 /* no swizzling for f-tiling */ 375 swizzle = I915_BIT_6_SWIZZLE_NONE; 376 } else if (tiling == CLIENT_TILING_X) { 377 v = div64_u64_rem(y, 8, &y) * stride * 8; 378 v += y * 512; 379 v += div64_u64_rem(x, 512, &x) << 12; 380 v += x; 381 382 swizzle = gt->ggtt->bit_6_swizzle_x; 383 } else { 384 const unsigned int ytile_span = 16; 385 const unsigned int ytile_height = 512; 386 387 v = div64_u64_rem(y, 32, &y) * stride * 32; 388 v += y * ytile_span; 389 v += div64_u64_rem(x, ytile_span, &x) * ytile_height; 390 v += x; 391 392 swizzle = gt->ggtt->bit_6_swizzle_y; 393 } 394 395 switch (swizzle) { 396 case I915_BIT_6_SWIZZLE_9: 397 v ^= swizzle_bit(9, v); 398 break; 399 case I915_BIT_6_SWIZZLE_9_10: 400 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v); 401 break; 402 case I915_BIT_6_SWIZZLE_9_11: 403 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v); 404 break; 405 case I915_BIT_6_SWIZZLE_9_10_11: 406 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v); 407 break; 408 } 409 410 return v; 411 } 412 413 static const char *repr_tiling(enum client_tiling tiling) 414 { 415 switch (tiling) { 416 case CLIENT_TILING_LINEAR: return "linear"; 417 case CLIENT_TILING_X: return "X"; 418 case CLIENT_TILING_Y: return "Y"; 419 case CLIENT_TILING_4: return "F"; 420 default: return "unknown"; 421 } 422 } 423 424 static int verify_buffer(const struct tiled_blits *t, 425 struct blit_buffer *buf, 426 struct rnd_state *prng) 427 { 428 const u32 *vaddr; 429 int ret = 0; 430 int x, y, p; 431 432 x = i915_prandom_u32_max_state(t->width, prng); 433 y = i915_prandom_u32_max_state(t->height, prng); 434 p = y * t->width + x; 435 436 vaddr = i915_gem_object_pin_map_unlocked(buf->vma->obj, I915_MAP_WC); 437 if (IS_ERR(vaddr)) 438 return PTR_ERR(vaddr); 439 440 if (vaddr[0] != buf->start_val) { 441 ret = -EINVAL; 442 } else { 443 u64 v = tiled_offset(buf->vma->vm->gt, 444 p * 4, t->width * 4, 445 buf->tiling, x, y); 446 447 if (vaddr[v / sizeof(*vaddr)] != buf->start_val + p) 448 ret = -EINVAL; 449 } 450 if (ret) { 451 pr_err("Invalid %s tiling detected at (%d, %d), start_val %x\n", 452 repr_tiling(buf->tiling), 453 x, y, buf->start_val); 454 igt_hexdump(vaddr, 4096); 455 } 456 457 i915_gem_object_unpin_map(buf->vma->obj); 458 return ret; 459 } 460 461 static int pin_buffer(struct i915_vma *vma, u64 addr) 462 { 463 int err; 464 465 if (drm_mm_node_allocated(&vma->node) && vma->node.start != addr) { 466 err = i915_vma_unbind_unlocked(vma); 467 if (err) 468 return err; 469 } 470 471 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED | addr); 472 if (err) 473 return err; 474 475 return 0; 476 } 477 478 static int 479 tiled_blit(struct tiled_blits *t, 480 struct blit_buffer *dst, u64 dst_addr, 481 struct blit_buffer *src, u64 src_addr) 482 { 483 struct i915_request *rq; 484 int err; 485 486 err = pin_buffer(src->vma, src_addr); 487 if (err) { 488 pr_err("Cannot pin src @ %llx\n", src_addr); 489 return err; 490 } 491 492 err = pin_buffer(dst->vma, dst_addr); 493 if (err) { 494 pr_err("Cannot pin dst @ %llx\n", dst_addr); 495 goto err_src; 496 } 497 498 err = i915_vma_pin(t->batch, 0, 0, PIN_USER | PIN_HIGH); 499 if (err) { 500 pr_err("cannot pin batch\n"); 501 goto err_dst; 502 } 503 504 err = prepare_blit(t, dst, src, t->batch->obj); 505 if (err) 506 goto err_bb; 507 508 rq = intel_context_create_request(t->ce); 509 if (IS_ERR(rq)) { 510 err = PTR_ERR(rq); 511 goto err_bb; 512 } 513 514 err = igt_vma_move_to_active_unlocked(t->batch, rq, 0); 515 if (!err) 516 err = igt_vma_move_to_active_unlocked(src->vma, rq, 0); 517 if (!err) 518 err = igt_vma_move_to_active_unlocked(dst->vma, rq, 0); 519 if (!err) 520 err = rq->engine->emit_bb_start(rq, 521 t->batch->node.start, 522 t->batch->node.size, 523 0); 524 i915_request_get(rq); 525 i915_request_add(rq); 526 if (i915_request_wait(rq, 0, HZ / 2) < 0) 527 err = -ETIME; 528 i915_request_put(rq); 529 530 dst->start_val = src->start_val; 531 err_bb: 532 i915_vma_unpin(t->batch); 533 err_dst: 534 i915_vma_unpin(dst->vma); 535 err_src: 536 i915_vma_unpin(src->vma); 537 return err; 538 } 539 540 static struct tiled_blits * 541 tiled_blits_create(struct intel_engine_cs *engine, struct rnd_state *prng) 542 { 543 struct drm_mm_node hole; 544 struct tiled_blits *t; 545 u64 hole_size; 546 int err; 547 548 t = kzalloc(sizeof(*t), GFP_KERNEL); 549 if (!t) 550 return ERR_PTR(-ENOMEM); 551 552 t->ce = intel_context_create(engine); 553 if (IS_ERR(t->ce)) { 554 err = PTR_ERR(t->ce); 555 goto err_free; 556 } 557 558 t->align = i915_vm_min_alignment(t->ce->vm, INTEL_MEMORY_LOCAL); 559 t->align = max(t->align, 560 i915_vm_min_alignment(t->ce->vm, INTEL_MEMORY_SYSTEM)); 561 562 hole_size = 2 * round_up(WIDTH * HEIGHT * 4, t->align); 563 hole_size *= 2; /* room to maneuver */ 564 hole_size += 2 * t->align; /* padding on either side */ 565 566 mutex_lock(&t->ce->vm->mutex); 567 memset(&hole, 0, sizeof(hole)); 568 err = drm_mm_insert_node_in_range(&t->ce->vm->mm, &hole, 569 hole_size, t->align, 570 I915_COLOR_UNEVICTABLE, 571 0, U64_MAX, 572 DRM_MM_INSERT_BEST); 573 if (!err) 574 drm_mm_remove_node(&hole); 575 mutex_unlock(&t->ce->vm->mutex); 576 if (err) { 577 err = -ENODEV; 578 goto err_put; 579 } 580 581 t->hole = hole.start + t->align; 582 pr_info("Using hole at %llx\n", t->hole); 583 584 err = tiled_blits_create_buffers(t, WIDTH, HEIGHT, prng); 585 if (err) 586 goto err_put; 587 588 return t; 589 590 err_put: 591 intel_context_put(t->ce); 592 err_free: 593 kfree(t); 594 return ERR_PTR(err); 595 } 596 597 static void tiled_blits_destroy(struct tiled_blits *t) 598 { 599 tiled_blits_destroy_buffers(t); 600 601 intel_context_put(t->ce); 602 kfree(t); 603 } 604 605 static int tiled_blits_prepare(struct tiled_blits *t, 606 struct rnd_state *prng) 607 { 608 u64 offset = round_up(t->width * t->height * 4, t->align); 609 u32 *map; 610 int err; 611 int i; 612 613 map = i915_gem_object_pin_map_unlocked(t->scratch.vma->obj, I915_MAP_WC); 614 if (IS_ERR(map)) 615 return PTR_ERR(map); 616 617 /* Use scratch to fill objects */ 618 for (i = 0; i < ARRAY_SIZE(t->buffers); i++) { 619 fill_scratch(t, map, prandom_u32_state(prng)); 620 GEM_BUG_ON(verify_buffer(t, &t->scratch, prng)); 621 622 err = tiled_blit(t, 623 &t->buffers[i], t->hole + offset, 624 &t->scratch, t->hole); 625 if (err == 0) 626 err = verify_buffer(t, &t->buffers[i], prng); 627 if (err) { 628 pr_err("Failed to create buffer %d\n", i); 629 break; 630 } 631 } 632 633 i915_gem_object_unpin_map(t->scratch.vma->obj); 634 return err; 635 } 636 637 static int tiled_blits_bounce(struct tiled_blits *t, struct rnd_state *prng) 638 { 639 u64 offset = round_up(t->width * t->height * 4, 2 * t->align); 640 int err; 641 642 /* We want to check position invariant tiling across GTT eviction */ 643 644 err = tiled_blit(t, 645 &t->buffers[1], t->hole + offset / 2, 646 &t->buffers[0], t->hole + 2 * offset); 647 if (err) 648 return err; 649 650 /* Simulating GTT eviction of the same buffer / layout */ 651 t->buffers[2].tiling = t->buffers[0].tiling; 652 653 /* Reposition so that we overlap the old addresses, and slightly off */ 654 err = tiled_blit(t, 655 &t->buffers[2], t->hole + t->align, 656 &t->buffers[1], t->hole + 3 * offset / 2); 657 if (err) 658 return err; 659 660 err = verify_buffer(t, &t->buffers[2], prng); 661 if (err) 662 return err; 663 664 return 0; 665 } 666 667 static int __igt_client_tiled_blits(struct intel_engine_cs *engine, 668 struct rnd_state *prng) 669 { 670 struct tiled_blits *t; 671 int err; 672 673 t = tiled_blits_create(engine, prng); 674 if (IS_ERR(t)) 675 return PTR_ERR(t); 676 677 err = tiled_blits_prepare(t, prng); 678 if (err) 679 goto out; 680 681 err = tiled_blits_bounce(t, prng); 682 if (err) 683 goto out; 684 685 out: 686 tiled_blits_destroy(t); 687 return err; 688 } 689 690 static bool has_bit17_swizzle(int sw) 691 { 692 return (sw == I915_BIT_6_SWIZZLE_9_10_17 || 693 sw == I915_BIT_6_SWIZZLE_9_17); 694 } 695 696 static bool bad_swizzling(struct drm_i915_private *i915) 697 { 698 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 699 700 if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) 701 return true; 702 703 if (has_bit17_swizzle(ggtt->bit_6_swizzle_x) || 704 has_bit17_swizzle(ggtt->bit_6_swizzle_y)) 705 return true; 706 707 return false; 708 } 709 710 static int igt_client_tiled_blits(void *arg) 711 { 712 struct drm_i915_private *i915 = arg; 713 I915_RND_STATE(prng); 714 int inst = 0; 715 716 /* Test requires explicit BLT tiling controls */ 717 if (GRAPHICS_VER(i915) < 4) 718 return 0; 719 720 if (bad_swizzling(i915)) /* Requires sane (sub-page) swizzling */ 721 return 0; 722 723 do { 724 struct intel_engine_cs *engine; 725 int err; 726 727 engine = intel_engine_lookup_user(i915, 728 I915_ENGINE_CLASS_COPY, 729 inst++); 730 if (!engine) 731 return 0; 732 733 err = __igt_client_tiled_blits(engine, &prng); 734 if (err == -ENODEV) 735 err = 0; 736 if (err) 737 return err; 738 } while (1); 739 } 740 741 int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915) 742 { 743 static const struct i915_subtest tests[] = { 744 SUBTEST(igt_client_tiled_blits), 745 }; 746 747 if (intel_gt_is_wedged(to_gt(i915))) 748 return 0; 749 750 return i915_live_subtests(tests, i915); 751 } 752