1 /* 2 * Copyright © 2014-2017 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include "gt/intel_gt.h" 26 #include "intel_guc.h" 27 #include "intel_guc_ads.h" 28 #include "intel_guc_submission.h" 29 #include "i915_drv.h" 30 31 static void gen8_guc_raise_irq(struct intel_guc *guc) 32 { 33 struct intel_gt *gt = guc_to_gt(guc); 34 35 intel_uncore_write(gt->uncore, GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER); 36 } 37 38 static void gen11_guc_raise_irq(struct intel_guc *guc) 39 { 40 struct intel_gt *gt = guc_to_gt(guc); 41 42 intel_uncore_write(gt->uncore, GEN11_GUC_HOST_INTERRUPT, 0); 43 } 44 45 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i) 46 { 47 GEM_BUG_ON(!guc->send_regs.base); 48 GEM_BUG_ON(!guc->send_regs.count); 49 GEM_BUG_ON(i >= guc->send_regs.count); 50 51 return _MMIO(guc->send_regs.base + 4 * i); 52 } 53 54 void intel_guc_init_send_regs(struct intel_guc *guc) 55 { 56 struct intel_gt *gt = guc_to_gt(guc); 57 enum forcewake_domains fw_domains = 0; 58 unsigned int i; 59 60 if (INTEL_GEN(gt->i915) >= 11) { 61 guc->send_regs.base = 62 i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0)); 63 guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT; 64 } else { 65 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); 66 guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN; 67 BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT); 68 } 69 70 for (i = 0; i < guc->send_regs.count; i++) { 71 fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore, 72 guc_send_reg(guc, i), 73 FW_REG_READ | FW_REG_WRITE); 74 } 75 guc->send_regs.fw_domains = fw_domains; 76 } 77 78 void intel_guc_init_early(struct intel_guc *guc) 79 { 80 struct drm_i915_private *i915 = guc_to_gt(guc)->i915; 81 82 intel_guc_fw_init_early(guc); 83 intel_guc_ct_init_early(&guc->ct); 84 intel_guc_log_init_early(&guc->log); 85 86 mutex_init(&guc->send_mutex); 87 spin_lock_init(&guc->irq_lock); 88 guc->send = intel_guc_send_nop; 89 guc->handler = intel_guc_to_host_event_handler_nop; 90 if (INTEL_GEN(i915) >= 11) { 91 guc->notify = gen11_guc_raise_irq; 92 guc->interrupts.reset = gen11_reset_guc_interrupts; 93 guc->interrupts.enable = gen11_enable_guc_interrupts; 94 guc->interrupts.disable = gen11_disable_guc_interrupts; 95 } else { 96 guc->notify = gen8_guc_raise_irq; 97 guc->interrupts.reset = gen9_reset_guc_interrupts; 98 guc->interrupts.enable = gen9_enable_guc_interrupts; 99 guc->interrupts.disable = gen9_disable_guc_interrupts; 100 } 101 } 102 103 static int guc_shared_data_create(struct intel_guc *guc) 104 { 105 struct i915_vma *vma; 106 void *vaddr; 107 108 vma = intel_guc_allocate_vma(guc, PAGE_SIZE); 109 if (IS_ERR(vma)) 110 return PTR_ERR(vma); 111 112 vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); 113 if (IS_ERR(vaddr)) { 114 i915_vma_unpin_and_release(&vma, 0); 115 return PTR_ERR(vaddr); 116 } 117 118 guc->shared_data = vma; 119 guc->shared_data_vaddr = vaddr; 120 121 return 0; 122 } 123 124 static void guc_shared_data_destroy(struct intel_guc *guc) 125 { 126 i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP); 127 } 128 129 static u32 guc_ctl_debug_flags(struct intel_guc *guc) 130 { 131 u32 level = intel_guc_log_get_level(&guc->log); 132 u32 flags = 0; 133 134 if (!GUC_LOG_LEVEL_IS_VERBOSE(level)) 135 flags |= GUC_LOG_DISABLED; 136 else 137 flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) << 138 GUC_LOG_VERBOSITY_SHIFT; 139 140 return flags; 141 } 142 143 static u32 guc_ctl_feature_flags(struct intel_guc *guc) 144 { 145 u32 flags = 0; 146 147 if (!intel_uc_is_using_guc_submission(&guc_to_gt(guc)->uc)) 148 flags |= GUC_CTL_DISABLE_SCHEDULER; 149 150 return flags; 151 } 152 153 static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc) 154 { 155 u32 flags = 0; 156 157 if (intel_uc_is_using_guc_submission(&guc_to_gt(guc)->uc)) { 158 u32 ctxnum, base; 159 160 base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool); 161 ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16; 162 163 base >>= PAGE_SHIFT; 164 flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) | 165 (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT); 166 } 167 return flags; 168 } 169 170 static u32 guc_ctl_log_params_flags(struct intel_guc *guc) 171 { 172 u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT; 173 u32 flags; 174 175 #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0) 176 #define UNIT SZ_1M 177 #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE 178 #else 179 #define UNIT SZ_4K 180 #define FLAG 0 181 #endif 182 183 BUILD_BUG_ON(!CRASH_BUFFER_SIZE); 184 BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT)); 185 BUILD_BUG_ON(!DPC_BUFFER_SIZE); 186 BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT)); 187 BUILD_BUG_ON(!ISR_BUFFER_SIZE); 188 BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT)); 189 190 BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) > 191 (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT)); 192 BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) > 193 (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT)); 194 BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) > 195 (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT)); 196 197 flags = GUC_LOG_VALID | 198 GUC_LOG_NOTIFY_ON_HALF_FULL | 199 FLAG | 200 ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) | 201 ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) | 202 ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) | 203 (offset << GUC_LOG_BUF_ADDR_SHIFT); 204 205 #undef UNIT 206 #undef FLAG 207 208 return flags; 209 } 210 211 static u32 guc_ctl_ads_flags(struct intel_guc *guc) 212 { 213 u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT; 214 u32 flags = ads << GUC_ADS_ADDR_SHIFT; 215 216 return flags; 217 } 218 219 /* 220 * Initialise the GuC parameter block before starting the firmware 221 * transfer. These parameters are read by the firmware on startup 222 * and cannot be changed thereafter. 223 */ 224 static void guc_init_params(struct intel_guc *guc) 225 { 226 u32 *params = guc->params; 227 int i; 228 229 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32)); 230 231 params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc); 232 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc); 233 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc); 234 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); 235 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc); 236 237 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) 238 DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]); 239 } 240 241 /* 242 * Initialise the GuC parameter block before starting the firmware 243 * transfer. These parameters are read by the firmware on startup 244 * and cannot be changed thereafter. 245 */ 246 void intel_guc_write_params(struct intel_guc *guc) 247 { 248 struct intel_uncore *uncore = guc_to_gt(guc)->uncore; 249 int i; 250 251 /* 252 * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and 253 * they are power context saved so it's ok to release forcewake 254 * when we are done here and take it again at xfer time. 255 */ 256 intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER); 257 258 intel_uncore_write(uncore, SOFT_SCRATCH(0), 0); 259 260 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) 261 intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]); 262 263 intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER); 264 } 265 266 int intel_guc_init(struct intel_guc *guc) 267 { 268 struct intel_gt *gt = guc_to_gt(guc); 269 int ret; 270 271 ret = intel_uc_fw_init(&guc->fw); 272 if (ret) 273 goto err_fetch; 274 275 ret = guc_shared_data_create(guc); 276 if (ret) 277 goto err_fw; 278 GEM_BUG_ON(!guc->shared_data); 279 280 ret = intel_guc_log_create(&guc->log); 281 if (ret) 282 goto err_shared; 283 284 ret = intel_guc_ads_create(guc); 285 if (ret) 286 goto err_log; 287 GEM_BUG_ON(!guc->ads_vma); 288 289 ret = intel_guc_ct_init(&guc->ct); 290 if (ret) 291 goto err_ads; 292 293 if (intel_uc_is_using_guc_submission(>->uc)) { 294 /* 295 * This is stuff we need to have available at fw load time 296 * if we are planning to enable submission later 297 */ 298 ret = intel_guc_submission_init(guc); 299 if (ret) 300 goto err_ct; 301 } 302 303 /* now that everything is perma-pinned, initialize the parameters */ 304 guc_init_params(guc); 305 306 /* We need to notify the guc whenever we change the GGTT */ 307 i915_ggtt_enable_guc(gt->ggtt); 308 309 return 0; 310 311 err_ct: 312 intel_guc_ct_fini(&guc->ct); 313 err_ads: 314 intel_guc_ads_destroy(guc); 315 err_log: 316 intel_guc_log_destroy(&guc->log); 317 err_shared: 318 guc_shared_data_destroy(guc); 319 err_fw: 320 intel_uc_fw_fini(&guc->fw); 321 err_fetch: 322 intel_uc_fw_cleanup_fetch(&guc->fw); 323 return ret; 324 } 325 326 void intel_guc_fini(struct intel_guc *guc) 327 { 328 struct intel_gt *gt = guc_to_gt(guc); 329 330 i915_ggtt_disable_guc(gt->ggtt); 331 332 if (intel_uc_is_using_guc_submission(>->uc)) 333 intel_guc_submission_fini(guc); 334 335 intel_guc_ct_fini(&guc->ct); 336 337 intel_guc_ads_destroy(guc); 338 intel_guc_log_destroy(&guc->log); 339 guc_shared_data_destroy(guc); 340 intel_uc_fw_fini(&guc->fw); 341 intel_uc_fw_cleanup_fetch(&guc->fw); 342 } 343 344 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, 345 u32 *response_buf, u32 response_buf_size) 346 { 347 WARN(1, "Unexpected send: action=%#x\n", *action); 348 return -ENODEV; 349 } 350 351 void intel_guc_to_host_event_handler_nop(struct intel_guc *guc) 352 { 353 WARN(1, "Unexpected event: no suitable handler\n"); 354 } 355 356 /* 357 * This function implements the MMIO based host to GuC interface. 358 */ 359 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, 360 u32 *response_buf, u32 response_buf_size) 361 { 362 struct intel_uncore *uncore = guc_to_gt(guc)->uncore; 363 u32 status; 364 int i; 365 int ret; 366 367 GEM_BUG_ON(!len); 368 GEM_BUG_ON(len > guc->send_regs.count); 369 370 /* We expect only action code */ 371 GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK); 372 373 /* If CT is available, we expect to use MMIO only during init/fini */ 374 GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER && 375 *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER); 376 377 mutex_lock(&guc->send_mutex); 378 intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains); 379 380 for (i = 0; i < len; i++) 381 intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]); 382 383 intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1)); 384 385 intel_guc_notify(guc); 386 387 /* 388 * No GuC command should ever take longer than 10ms. 389 * Fast commands should still complete in 10us. 390 */ 391 ret = __intel_wait_for_register_fw(uncore, 392 guc_send_reg(guc, 0), 393 INTEL_GUC_MSG_TYPE_MASK, 394 INTEL_GUC_MSG_TYPE_RESPONSE << 395 INTEL_GUC_MSG_TYPE_SHIFT, 396 10, 10, &status); 397 /* If GuC explicitly returned an error, convert it to -EIO */ 398 if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status)) 399 ret = -EIO; 400 401 if (ret) { 402 DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n", 403 action[0], ret, status); 404 goto out; 405 } 406 407 if (response_buf) { 408 int count = min(response_buf_size, guc->send_regs.count - 1); 409 410 for (i = 0; i < count; i++) 411 response_buf[i] = intel_uncore_read(uncore, 412 guc_send_reg(guc, i + 1)); 413 } 414 415 /* Use data from the GuC response as our return value */ 416 ret = INTEL_GUC_MSG_TO_DATA(status); 417 418 out: 419 intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains); 420 mutex_unlock(&guc->send_mutex); 421 422 return ret; 423 } 424 425 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, 426 const u32 *payload, u32 len) 427 { 428 u32 msg; 429 430 if (unlikely(!len)) 431 return -EPROTO; 432 433 /* Make sure to handle only enabled messages */ 434 msg = payload[0] & guc->msg_enabled_mask; 435 436 if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | 437 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)) 438 intel_guc_log_handle_flush_event(&guc->log); 439 440 return 0; 441 } 442 443 int intel_guc_sample_forcewake(struct intel_guc *guc) 444 { 445 struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; 446 u32 action[2]; 447 448 action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE; 449 /* WaRsDisableCoarsePowerGating:skl,cnl */ 450 if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) 451 action[1] = 0; 452 else 453 /* bit 0 and 1 are for Render and Media domain separately */ 454 action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA; 455 456 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 457 } 458 459 /** 460 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode 461 * @guc: intel_guc structure 462 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma 463 * 464 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send 465 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by 466 * intel_huc_auth(). 467 * 468 * Return: non-zero code on error 469 */ 470 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset) 471 { 472 u32 action[] = { 473 INTEL_GUC_ACTION_AUTHENTICATE_HUC, 474 rsa_offset 475 }; 476 477 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 478 } 479 480 /** 481 * intel_guc_suspend() - notify GuC entering suspend state 482 * @guc: the guc 483 */ 484 int intel_guc_suspend(struct intel_guc *guc) 485 { 486 struct intel_uncore *uncore = guc_to_gt(guc)->uncore; 487 int ret; 488 u32 status; 489 u32 action[] = { 490 INTEL_GUC_ACTION_ENTER_S_STATE, 491 GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */ 492 }; 493 494 /* 495 * The ENTER_S_STATE action queues the save/restore operation in GuC FW 496 * and then returns, so waiting on the H2G is not enough to guarantee 497 * GuC is done. When all the processing is done, GuC writes 498 * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll 499 * on that. Note that GuC does not ensure that the value in the register 500 * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is 501 * in progress so we need to take care of that ourselves as well. 502 */ 503 504 intel_uncore_write(uncore, SOFT_SCRATCH(14), 505 INTEL_GUC_SLEEP_STATE_INVALID_MASK); 506 507 ret = intel_guc_send(guc, action, ARRAY_SIZE(action)); 508 if (ret) 509 return ret; 510 511 ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14), 512 INTEL_GUC_SLEEP_STATE_INVALID_MASK, 513 0, 0, 10, &status); 514 if (ret) 515 return ret; 516 517 if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) { 518 DRM_ERROR("GuC failed to change sleep state. " 519 "action=0x%x, err=%u\n", 520 action[0], status); 521 return -EIO; 522 } 523 524 return 0; 525 } 526 527 /** 528 * intel_guc_reset_engine() - ask GuC to reset an engine 529 * @guc: intel_guc structure 530 * @engine: engine to be reset 531 */ 532 int intel_guc_reset_engine(struct intel_guc *guc, 533 struct intel_engine_cs *engine) 534 { 535 u32 data[7]; 536 537 GEM_BUG_ON(!guc->execbuf_client); 538 539 data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET; 540 data[1] = engine->guc_id; 541 data[2] = 0; 542 data[3] = 0; 543 data[4] = 0; 544 data[5] = guc->execbuf_client->stage_id; 545 data[6] = intel_guc_ggtt_offset(guc, guc->shared_data); 546 547 return intel_guc_send(guc, data, ARRAY_SIZE(data)); 548 } 549 550 /** 551 * intel_guc_resume() - notify GuC resuming from suspend state 552 * @guc: the guc 553 */ 554 int intel_guc_resume(struct intel_guc *guc) 555 { 556 u32 action[] = { 557 INTEL_GUC_ACTION_EXIT_S_STATE, 558 GUC_POWER_D0, 559 }; 560 561 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 562 } 563 564 /** 565 * DOC: GuC Address Space 566 * 567 * The layout of GuC address space is shown below: 568 * 569 * :: 570 * 571 * +===========> +====================+ <== FFFF_FFFF 572 * ^ | Reserved | 573 * | +====================+ <== GUC_GGTT_TOP 574 * | | | 575 * | | DRAM | 576 * GuC | | 577 * Address +===> +====================+ <== GuC ggtt_pin_bias 578 * Space ^ | | 579 * | | | | 580 * | GuC | GuC | 581 * | WOPCM | WOPCM | 582 * | Size | | 583 * | | | | 584 * v v | | 585 * +=======+===> +====================+ <== 0000_0000 586 * 587 * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM 588 * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped 589 * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size. 590 */ 591 592 /** 593 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage 594 * @guc: the guc 595 * @size: size of area to allocate (both virtual space and memory) 596 * 597 * This is a wrapper to create an object for use with the GuC. In order to 598 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate 599 * both some backing storage and a range inside the Global GTT. We must pin 600 * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that 601 * range is reserved inside GuC. 602 * 603 * Return: A i915_vma if successful, otherwise an ERR_PTR. 604 */ 605 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) 606 { 607 struct intel_gt *gt = guc_to_gt(guc); 608 struct drm_i915_gem_object *obj; 609 struct i915_vma *vma; 610 u64 flags; 611 int ret; 612 613 obj = i915_gem_object_create_shmem(gt->i915, size); 614 if (IS_ERR(obj)) 615 return ERR_CAST(obj); 616 617 vma = i915_vma_instance(obj, >->ggtt->vm, NULL); 618 if (IS_ERR(vma)) 619 goto err; 620 621 flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); 622 ret = i915_vma_pin(vma, 0, 0, flags); 623 if (ret) { 624 vma = ERR_PTR(ret); 625 goto err; 626 } 627 628 return vma; 629 630 err: 631 i915_gem_object_put(obj); 632 return vma; 633 } 634