1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/prime_numbers.h> 26 27 #include "../i915_selftest.h" 28 29 #include "mock_gem_device.h" 30 #include "mock_context.h" 31 32 static bool assert_vma(struct i915_vma *vma, 33 struct drm_i915_gem_object *obj, 34 struct i915_gem_context *ctx) 35 { 36 bool ok = true; 37 38 if (vma->vm != &ctx->ppgtt->base) { 39 pr_err("VMA created with wrong VM\n"); 40 ok = false; 41 } 42 43 if (vma->size != obj->base.size) { 44 pr_err("VMA created with wrong size, found %llu, expected %zu\n", 45 vma->size, obj->base.size); 46 ok = false; 47 } 48 49 if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) { 50 pr_err("VMA created with wrong type [%d]\n", 51 vma->ggtt_view.type); 52 ok = false; 53 } 54 55 return ok; 56 } 57 58 static struct i915_vma * 59 checked_vma_instance(struct drm_i915_gem_object *obj, 60 struct i915_address_space *vm, 61 struct i915_ggtt_view *view) 62 { 63 struct i915_vma *vma; 64 bool ok = true; 65 66 vma = i915_vma_instance(obj, vm, view); 67 if (IS_ERR(vma)) 68 return vma; 69 70 /* Manual checks, will be reinforced by i915_vma_compare! */ 71 if (vma->vm != vm) { 72 pr_err("VMA's vm [%p] does not match request [%p]\n", 73 vma->vm, vm); 74 ok = false; 75 } 76 77 if (i915_is_ggtt(vm) != i915_vma_is_ggtt(vma)) { 78 pr_err("VMA ggtt status [%d] does not match parent [%d]\n", 79 i915_vma_is_ggtt(vma), i915_is_ggtt(vm)); 80 ok = false; 81 } 82 83 if (i915_vma_compare(vma, vm, view)) { 84 pr_err("i915_vma_compare failed with create parmaters!\n"); 85 return ERR_PTR(-EINVAL); 86 } 87 88 if (i915_vma_compare(vma, vma->vm, 89 i915_vma_is_ggtt(vma) ? &vma->ggtt_view : NULL)) { 90 pr_err("i915_vma_compare failed with itself\n"); 91 return ERR_PTR(-EINVAL); 92 } 93 94 if (!ok) { 95 pr_err("i915_vma_compare failed to detect the difference!\n"); 96 return ERR_PTR(-EINVAL); 97 } 98 99 return vma; 100 } 101 102 static int create_vmas(struct drm_i915_private *i915, 103 struct list_head *objects, 104 struct list_head *contexts) 105 { 106 struct drm_i915_gem_object *obj; 107 struct i915_gem_context *ctx; 108 int pinned; 109 110 list_for_each_entry(obj, objects, st_link) { 111 for (pinned = 0; pinned <= 1; pinned++) { 112 list_for_each_entry(ctx, contexts, link) { 113 struct i915_address_space *vm = 114 &ctx->ppgtt->base; 115 struct i915_vma *vma; 116 int err; 117 118 vma = checked_vma_instance(obj, vm, NULL); 119 if (IS_ERR(vma)) 120 return PTR_ERR(vma); 121 122 if (!assert_vma(vma, obj, ctx)) { 123 pr_err("VMA lookup/create failed\n"); 124 return -EINVAL; 125 } 126 127 if (!pinned) { 128 err = i915_vma_pin(vma, 0, 0, PIN_USER); 129 if (err) { 130 pr_err("Failed to pin VMA\n"); 131 return err; 132 } 133 } else { 134 i915_vma_unpin(vma); 135 } 136 } 137 } 138 } 139 140 return 0; 141 } 142 143 static int igt_vma_create(void *arg) 144 { 145 struct drm_i915_private *i915 = arg; 146 struct drm_i915_gem_object *obj, *on; 147 struct i915_gem_context *ctx, *cn; 148 unsigned long num_obj, num_ctx; 149 unsigned long no, nc; 150 IGT_TIMEOUT(end_time); 151 LIST_HEAD(contexts); 152 LIST_HEAD(objects); 153 int err; 154 155 /* Exercise creating many vma amonst many objections, checking the 156 * vma creation and lookup routines. 157 */ 158 159 no = 0; 160 for_each_prime_number(num_obj, ULONG_MAX - 1) { 161 for (; no < num_obj; no++) { 162 obj = i915_gem_object_create_internal(i915, PAGE_SIZE); 163 if (IS_ERR(obj)) 164 goto out; 165 166 list_add(&obj->st_link, &objects); 167 } 168 169 nc = 0; 170 for_each_prime_number(num_ctx, MAX_CONTEXT_HW_ID) { 171 for (; nc < num_ctx; nc++) { 172 ctx = mock_context(i915, "mock"); 173 if (!ctx) 174 goto out; 175 176 list_move(&ctx->link, &contexts); 177 } 178 179 err = create_vmas(i915, &objects, &contexts); 180 if (err) 181 goto out; 182 183 if (igt_timeout(end_time, 184 "%s timed out: after %lu objects in %lu contexts\n", 185 __func__, no, nc)) 186 goto end; 187 } 188 189 list_for_each_entry_safe(ctx, cn, &contexts, link) { 190 list_del_init(&ctx->link); 191 mock_context_close(ctx); 192 } 193 } 194 195 end: 196 /* Final pass to lookup all created contexts */ 197 err = create_vmas(i915, &objects, &contexts); 198 out: 199 list_for_each_entry_safe(ctx, cn, &contexts, link) { 200 list_del_init(&ctx->link); 201 mock_context_close(ctx); 202 } 203 204 list_for_each_entry_safe(obj, on, &objects, st_link) 205 i915_gem_object_put(obj); 206 return err; 207 } 208 209 struct pin_mode { 210 u64 size; 211 u64 flags; 212 bool (*assert)(const struct i915_vma *, 213 const struct pin_mode *mode, 214 int result); 215 const char *string; 216 }; 217 218 static bool assert_pin_valid(const struct i915_vma *vma, 219 const struct pin_mode *mode, 220 int result) 221 { 222 if (result) 223 return false; 224 225 if (i915_vma_misplaced(vma, mode->size, 0, mode->flags)) 226 return false; 227 228 return true; 229 } 230 231 __maybe_unused 232 static bool assert_pin_enospc(const struct i915_vma *vma, 233 const struct pin_mode *mode, 234 int result) 235 { 236 return result == -ENOSPC; 237 } 238 239 __maybe_unused 240 static bool assert_pin_einval(const struct i915_vma *vma, 241 const struct pin_mode *mode, 242 int result) 243 { 244 return result == -EINVAL; 245 } 246 247 static int igt_vma_pin1(void *arg) 248 { 249 struct drm_i915_private *i915 = arg; 250 const struct pin_mode modes[] = { 251 #define VALID(sz, fl) { .size = (sz), .flags = (fl), .assert = assert_pin_valid, .string = #sz ", " #fl ", (valid) " } 252 #define __INVALID(sz, fl, check, eval) { .size = (sz), .flags = (fl), .assert = (check), .string = #sz ", " #fl ", (invalid " #eval ")" } 253 #define INVALID(sz, fl) __INVALID(sz, fl, assert_pin_einval, EINVAL) 254 #define NOSPACE(sz, fl) __INVALID(sz, fl, assert_pin_enospc, ENOSPC) 255 VALID(0, PIN_GLOBAL), 256 VALID(0, PIN_GLOBAL | PIN_MAPPABLE), 257 258 VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 4096), 259 VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192), 260 VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), 261 VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), 262 VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)), 263 264 VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)), 265 INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end), 266 VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)), 267 INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.base.total), 268 INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)), 269 270 VALID(4096, PIN_GLOBAL), 271 VALID(8192, PIN_GLOBAL), 272 VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE), 273 VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE), 274 NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE), 275 VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL), 276 VALID(i915->ggtt.base.total, PIN_GLOBAL), 277 NOSPACE(i915->ggtt.base.total + 4096, PIN_GLOBAL), 278 NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL), 279 INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)), 280 INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)), 281 INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)), 282 283 VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), 284 285 #if !IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 286 /* Misusing BIAS is a programming error (it is not controllable 287 * from userspace) so when debugging is enabled, it explodes. 288 * However, the tests are still quite interesting for checking 289 * variable start, end and size. 290 */ 291 NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end), 292 NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.base.total), 293 NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), 294 NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)), 295 #endif 296 { }, 297 #undef NOSPACE 298 #undef INVALID 299 #undef __INVALID 300 #undef VALID 301 }, *m; 302 struct drm_i915_gem_object *obj; 303 struct i915_vma *vma; 304 int err = -EINVAL; 305 306 /* Exercise all the weird and wonderful i915_vma_pin requests, 307 * focusing on error handling of boundary conditions. 308 */ 309 310 GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.base.mm)); 311 312 obj = i915_gem_object_create_internal(i915, PAGE_SIZE); 313 if (IS_ERR(obj)) 314 return PTR_ERR(obj); 315 316 vma = checked_vma_instance(obj, &i915->ggtt.base, NULL); 317 if (IS_ERR(vma)) 318 goto out; 319 320 for (m = modes; m->assert; m++) { 321 err = i915_vma_pin(vma, m->size, 0, m->flags); 322 if (!m->assert(vma, m, err)) { 323 pr_err("%s to pin single page into GGTT with mode[%d:%s]: size=%llx flags=%llx, err=%d\n", 324 m->assert == assert_pin_valid ? "Failed" : "Unexpectedly succeeded", 325 (int)(m - modes), m->string, m->size, m->flags, 326 err); 327 if (!err) 328 i915_vma_unpin(vma); 329 err = -EINVAL; 330 goto out; 331 } 332 333 if (!err) { 334 i915_vma_unpin(vma); 335 err = i915_vma_unbind(vma); 336 if (err) { 337 pr_err("Failed to unbind single page from GGTT, err=%d\n", err); 338 goto out; 339 } 340 } 341 } 342 343 err = 0; 344 out: 345 i915_gem_object_put(obj); 346 return err; 347 } 348 349 static unsigned long rotated_index(const struct intel_rotation_info *r, 350 unsigned int n, 351 unsigned int x, 352 unsigned int y) 353 { 354 return (r->plane[n].stride * (r->plane[n].height - y - 1) + 355 r->plane[n].offset + x); 356 } 357 358 static struct scatterlist * 359 assert_rotated(struct drm_i915_gem_object *obj, 360 const struct intel_rotation_info *r, unsigned int n, 361 struct scatterlist *sg) 362 { 363 unsigned int x, y; 364 365 for (x = 0; x < r->plane[n].width; x++) { 366 for (y = 0; y < r->plane[n].height; y++) { 367 unsigned long src_idx; 368 dma_addr_t src; 369 370 if (!sg) { 371 pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n", 372 n, x, y); 373 return ERR_PTR(-EINVAL); 374 } 375 376 src_idx = rotated_index(r, n, x, y); 377 src = i915_gem_object_get_dma_address(obj, src_idx); 378 379 if (sg_dma_len(sg) != PAGE_SIZE) { 380 pr_err("Invalid sg.length, found %d, expected %lu for rotated page (%d, %d) [src index %lu]\n", 381 sg_dma_len(sg), PAGE_SIZE, 382 x, y, src_idx); 383 return ERR_PTR(-EINVAL); 384 } 385 386 if (sg_dma_address(sg) != src) { 387 pr_err("Invalid address for rotated page (%d, %d) [src index %lu]\n", 388 x, y, src_idx); 389 return ERR_PTR(-EINVAL); 390 } 391 392 sg = sg_next(sg); 393 } 394 } 395 396 return sg; 397 } 398 399 static unsigned int rotated_size(const struct intel_rotation_plane_info *a, 400 const struct intel_rotation_plane_info *b) 401 { 402 return a->width * a->height + b->width * b->height; 403 } 404 405 static int igt_vma_rotate(void *arg) 406 { 407 struct drm_i915_private *i915 = arg; 408 struct i915_address_space *vm = &i915->ggtt.base; 409 struct drm_i915_gem_object *obj; 410 const struct intel_rotation_plane_info planes[] = { 411 { .width = 1, .height = 1, .stride = 1 }, 412 { .width = 2, .height = 2, .stride = 2 }, 413 { .width = 4, .height = 4, .stride = 4 }, 414 { .width = 8, .height = 8, .stride = 8 }, 415 416 { .width = 3, .height = 5, .stride = 3 }, 417 { .width = 3, .height = 5, .stride = 4 }, 418 { .width = 3, .height = 5, .stride = 5 }, 419 420 { .width = 5, .height = 3, .stride = 5 }, 421 { .width = 5, .height = 3, .stride = 7 }, 422 { .width = 5, .height = 3, .stride = 9 }, 423 424 { .width = 4, .height = 6, .stride = 6 }, 425 { .width = 6, .height = 4, .stride = 6 }, 426 { } 427 }, *a, *b; 428 const unsigned int max_pages = 64; 429 int err = -ENOMEM; 430 431 /* Create VMA for many different combinations of planes and check 432 * that the page layout within the rotated VMA match our expectations. 433 */ 434 435 obj = i915_gem_object_create_internal(i915, max_pages * PAGE_SIZE); 436 if (IS_ERR(obj)) 437 goto out; 438 439 for (a = planes; a->width; a++) { 440 for (b = planes + ARRAY_SIZE(planes); b-- != planes; ) { 441 struct i915_ggtt_view view; 442 unsigned int n, max_offset; 443 444 max_offset = max(a->stride * a->height, 445 b->stride * b->height); 446 GEM_BUG_ON(max_offset > max_pages); 447 max_offset = max_pages - max_offset; 448 449 view.type = I915_GGTT_VIEW_ROTATED; 450 view.rotated.plane[0] = *a; 451 view.rotated.plane[1] = *b; 452 453 for_each_prime_number_from(view.rotated.plane[0].offset, 0, max_offset) { 454 for_each_prime_number_from(view.rotated.plane[1].offset, 0, max_offset) { 455 struct scatterlist *sg; 456 struct i915_vma *vma; 457 458 vma = checked_vma_instance(obj, vm, &view); 459 if (IS_ERR(vma)) { 460 err = PTR_ERR(vma); 461 goto out_object; 462 } 463 464 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); 465 if (err) { 466 pr_err("Failed to pin VMA, err=%d\n", err); 467 goto out_object; 468 } 469 470 if (vma->size != rotated_size(a, b) * PAGE_SIZE) { 471 pr_err("VMA is wrong size, expected %lu, found %llu\n", 472 PAGE_SIZE * rotated_size(a, b), vma->size); 473 err = -EINVAL; 474 goto out_object; 475 } 476 477 if (vma->pages->nents != rotated_size(a, b)) { 478 pr_err("sg table is wrong sizeo, expected %u, found %u nents\n", 479 rotated_size(a, b), vma->pages->nents); 480 err = -EINVAL; 481 goto out_object; 482 } 483 484 if (vma->node.size < vma->size) { 485 pr_err("VMA binding too small, expected %llu, found %llu\n", 486 vma->size, vma->node.size); 487 err = -EINVAL; 488 goto out_object; 489 } 490 491 if (vma->pages == obj->mm.pages) { 492 pr_err("VMA using unrotated object pages!\n"); 493 err = -EINVAL; 494 goto out_object; 495 } 496 497 sg = vma->pages->sgl; 498 for (n = 0; n < ARRAY_SIZE(view.rotated.plane); n++) { 499 sg = assert_rotated(obj, &view.rotated, n, sg); 500 if (IS_ERR(sg)) { 501 pr_err("Inconsistent VMA pages for plane %d: [(%d, %d, %d, %d), (%d, %d, %d, %d)]\n", n, 502 view.rotated.plane[0].width, 503 view.rotated.plane[0].height, 504 view.rotated.plane[0].stride, 505 view.rotated.plane[0].offset, 506 view.rotated.plane[1].width, 507 view.rotated.plane[1].height, 508 view.rotated.plane[1].stride, 509 view.rotated.plane[1].offset); 510 err = -EINVAL; 511 goto out_object; 512 } 513 } 514 515 i915_vma_unpin(vma); 516 } 517 } 518 } 519 } 520 521 out_object: 522 i915_gem_object_put(obj); 523 out: 524 return err; 525 } 526 527 static bool assert_partial(struct drm_i915_gem_object *obj, 528 struct i915_vma *vma, 529 unsigned long offset, 530 unsigned long size) 531 { 532 struct sgt_iter sgt; 533 dma_addr_t dma; 534 535 for_each_sgt_dma(dma, sgt, vma->pages) { 536 dma_addr_t src; 537 538 if (!size) { 539 pr_err("Partial scattergather list too long\n"); 540 return false; 541 } 542 543 src = i915_gem_object_get_dma_address(obj, offset); 544 if (src != dma) { 545 pr_err("DMA mismatch for partial page offset %lu\n", 546 offset); 547 return false; 548 } 549 550 offset++; 551 size--; 552 } 553 554 return true; 555 } 556 557 static bool assert_pin(struct i915_vma *vma, 558 struct i915_ggtt_view *view, 559 u64 size, 560 const char *name) 561 { 562 bool ok = true; 563 564 if (vma->size != size) { 565 pr_err("(%s) VMA is wrong size, expected %llu, found %llu\n", 566 name, size, vma->size); 567 ok = false; 568 } 569 570 if (vma->node.size < vma->size) { 571 pr_err("(%s) VMA binding too small, expected %llu, found %llu\n", 572 name, vma->size, vma->node.size); 573 ok = false; 574 } 575 576 if (view && view->type != I915_GGTT_VIEW_NORMAL) { 577 if (memcmp(&vma->ggtt_view, view, sizeof(*view))) { 578 pr_err("(%s) VMA mismatch upon creation!\n", 579 name); 580 ok = false; 581 } 582 583 if (vma->pages == vma->obj->mm.pages) { 584 pr_err("(%s) VMA using original object pages!\n", 585 name); 586 ok = false; 587 } 588 } else { 589 if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) { 590 pr_err("Not the normal ggtt view! Found %d\n", 591 vma->ggtt_view.type); 592 ok = false; 593 } 594 595 if (vma->pages != vma->obj->mm.pages) { 596 pr_err("VMA not using object pages!\n"); 597 ok = false; 598 } 599 } 600 601 return ok; 602 } 603 604 static int igt_vma_partial(void *arg) 605 { 606 struct drm_i915_private *i915 = arg; 607 struct i915_address_space *vm = &i915->ggtt.base; 608 const unsigned int npages = 1021; /* prime! */ 609 struct drm_i915_gem_object *obj; 610 const struct phase { 611 const char *name; 612 } phases[] = { 613 { "create" }, 614 { "lookup" }, 615 { }, 616 }, *p; 617 unsigned int sz, offset; 618 struct i915_vma *vma; 619 int err = -ENOMEM; 620 621 /* Create lots of different VMA for the object and check that 622 * we are returned the same VMA when we later request the same range. 623 */ 624 625 obj = i915_gem_object_create_internal(i915, npages*PAGE_SIZE); 626 if (IS_ERR(obj)) 627 goto out; 628 629 for (p = phases; p->name; p++) { /* exercise both create/lookup */ 630 unsigned int count, nvma; 631 632 nvma = 0; 633 for_each_prime_number_from(sz, 1, npages) { 634 for_each_prime_number_from(offset, 0, npages - sz) { 635 struct i915_ggtt_view view; 636 637 view.type = I915_GGTT_VIEW_PARTIAL; 638 view.partial.offset = offset; 639 view.partial.size = sz; 640 641 if (sz == npages) 642 view.type = I915_GGTT_VIEW_NORMAL; 643 644 vma = checked_vma_instance(obj, vm, &view); 645 if (IS_ERR(vma)) { 646 err = PTR_ERR(vma); 647 goto out_object; 648 } 649 650 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); 651 if (err) 652 goto out_object; 653 654 if (!assert_pin(vma, &view, sz*PAGE_SIZE, p->name)) { 655 pr_err("(%s) Inconsistent partial pinning for (offset=%d, size=%d)\n", 656 p->name, offset, sz); 657 err = -EINVAL; 658 goto out_object; 659 } 660 661 if (!assert_partial(obj, vma, offset, sz)) { 662 pr_err("(%s) Inconsistent partial pages for (offset=%d, size=%d)\n", 663 p->name, offset, sz); 664 err = -EINVAL; 665 goto out_object; 666 } 667 668 i915_vma_unpin(vma); 669 nvma++; 670 } 671 } 672 673 count = 0; 674 list_for_each_entry(vma, &obj->vma_list, obj_link) 675 count++; 676 if (count != nvma) { 677 pr_err("(%s) All partial vma were not recorded on the obj->vma_list: found %u, expected %u\n", 678 p->name, count, nvma); 679 err = -EINVAL; 680 goto out_object; 681 } 682 683 /* Check that we did create the whole object mapping */ 684 vma = checked_vma_instance(obj, vm, NULL); 685 if (IS_ERR(vma)) { 686 err = PTR_ERR(vma); 687 goto out_object; 688 } 689 690 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); 691 if (err) 692 goto out_object; 693 694 if (!assert_pin(vma, NULL, obj->base.size, p->name)) { 695 pr_err("(%s) inconsistent full pin\n", p->name); 696 err = -EINVAL; 697 goto out_object; 698 } 699 700 i915_vma_unpin(vma); 701 702 count = 0; 703 list_for_each_entry(vma, &obj->vma_list, obj_link) 704 count++; 705 if (count != nvma) { 706 pr_err("(%s) allocated an extra full vma!\n", p->name); 707 err = -EINVAL; 708 goto out_object; 709 } 710 } 711 712 out_object: 713 i915_gem_object_put(obj); 714 out: 715 return err; 716 } 717 718 int i915_vma_mock_selftests(void) 719 { 720 static const struct i915_subtest tests[] = { 721 SUBTEST(igt_vma_create), 722 SUBTEST(igt_vma_pin1), 723 SUBTEST(igt_vma_rotate), 724 SUBTEST(igt_vma_partial), 725 }; 726 struct drm_i915_private *i915; 727 int err; 728 729 i915 = mock_gem_device(); 730 if (!i915) 731 return -ENOMEM; 732 733 mutex_lock(&i915->drm.struct_mutex); 734 err = i915_subtests(tests, i915); 735 mutex_unlock(&i915->drm.struct_mutex); 736 737 drm_dev_unref(&i915->drm); 738 return err; 739 } 740 741