1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2014-2019 Intel Corporation 4 */ 5 6 #include <linux/bsearch.h> 7 8 #include "gt/intel_engine_regs.h" 9 #include "gt/intel_gt.h" 10 #include "gt/intel_gt_regs.h" 11 #include "gt/intel_lrc.h" 12 #include "gt/shmem_utils.h" 13 #include "intel_guc_ads.h" 14 #include "intel_guc_fwif.h" 15 #include "intel_uc.h" 16 #include "i915_drv.h" 17 18 /* 19 * The Additional Data Struct (ADS) has pointers for different buffers used by 20 * the GuC. One single gem object contains the ADS struct itself (guc_ads) and 21 * all the extra buffers indirectly linked via the ADS struct's entries. 22 * 23 * Layout of the ADS blob allocated for the GuC: 24 * 25 * +---------------------------------------+ <== base 26 * | guc_ads | 27 * +---------------------------------------+ 28 * | guc_policies | 29 * +---------------------------------------+ 30 * | guc_gt_system_info | 31 * +---------------------------------------+ 32 * | guc_engine_usage | 33 * +---------------------------------------+ <== static 34 * | guc_mmio_reg[countA] (engine 0.0) | 35 * | guc_mmio_reg[countB] (engine 0.1) | 36 * | guc_mmio_reg[countC] (engine 1.0) | 37 * | ... | 38 * +---------------------------------------+ <== dynamic 39 * | padding | 40 * +---------------------------------------+ <== 4K aligned 41 * | golden contexts | 42 * +---------------------------------------+ 43 * | padding | 44 * +---------------------------------------+ <== 4K aligned 45 * | capture lists | 46 * +---------------------------------------+ 47 * | padding | 48 * +---------------------------------------+ <== 4K aligned 49 * | private data | 50 * +---------------------------------------+ 51 * | padding | 52 * +---------------------------------------+ <== 4K aligned 53 */ 54 struct __guc_ads_blob { 55 struct guc_ads ads; 56 struct guc_policies policies; 57 struct guc_gt_system_info system_info; 58 struct guc_engine_usage engine_usage; 59 /* From here on, location is dynamic! Refer to above diagram. */ 60 struct guc_mmio_reg regset[0]; 61 } __packed; 62 63 static u32 guc_ads_regset_size(struct intel_guc *guc) 64 { 65 GEM_BUG_ON(!guc->ads_regset_size); 66 return guc->ads_regset_size; 67 } 68 69 static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc) 70 { 71 return PAGE_ALIGN(guc->ads_golden_ctxt_size); 72 } 73 74 static u32 guc_ads_capture_size(struct intel_guc *guc) 75 { 76 /* FIXME: Allocate a proper capture list */ 77 return PAGE_ALIGN(PAGE_SIZE); 78 } 79 80 static u32 guc_ads_private_data_size(struct intel_guc *guc) 81 { 82 return PAGE_ALIGN(guc->fw.private_data_size); 83 } 84 85 static u32 guc_ads_regset_offset(struct intel_guc *guc) 86 { 87 return offsetof(struct __guc_ads_blob, regset); 88 } 89 90 static u32 guc_ads_golden_ctxt_offset(struct intel_guc *guc) 91 { 92 u32 offset; 93 94 offset = guc_ads_regset_offset(guc) + 95 guc_ads_regset_size(guc); 96 97 return PAGE_ALIGN(offset); 98 } 99 100 static u32 guc_ads_capture_offset(struct intel_guc *guc) 101 { 102 u32 offset; 103 104 offset = guc_ads_golden_ctxt_offset(guc) + 105 guc_ads_golden_ctxt_size(guc); 106 107 return PAGE_ALIGN(offset); 108 } 109 110 static u32 guc_ads_private_data_offset(struct intel_guc *guc) 111 { 112 u32 offset; 113 114 offset = guc_ads_capture_offset(guc) + 115 guc_ads_capture_size(guc); 116 117 return PAGE_ALIGN(offset); 118 } 119 120 static u32 guc_ads_blob_size(struct intel_guc *guc) 121 { 122 return guc_ads_private_data_offset(guc) + 123 guc_ads_private_data_size(guc); 124 } 125 126 static void guc_policies_init(struct intel_guc *guc, struct guc_policies *policies) 127 { 128 struct intel_gt *gt = guc_to_gt(guc); 129 struct drm_i915_private *i915 = gt->i915; 130 131 policies->dpc_promote_time = GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US; 132 policies->max_num_work_items = GLOBAL_POLICY_MAX_NUM_WI; 133 134 policies->global_flags = 0; 135 if (i915->params.reset < 2) 136 policies->global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET; 137 138 policies->is_valid = 1; 139 } 140 141 void intel_guc_ads_print_policy_info(struct intel_guc *guc, 142 struct drm_printer *dp) 143 { 144 struct __guc_ads_blob *blob = guc->ads_blob; 145 146 if (unlikely(!blob)) 147 return; 148 149 drm_printf(dp, "Global scheduling policies:\n"); 150 drm_printf(dp, " DPC promote time = %u\n", blob->policies.dpc_promote_time); 151 drm_printf(dp, " Max num work items = %u\n", blob->policies.max_num_work_items); 152 drm_printf(dp, " Flags = %u\n", blob->policies.global_flags); 153 } 154 155 static int guc_action_policies_update(struct intel_guc *guc, u32 policy_offset) 156 { 157 u32 action[] = { 158 INTEL_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE, 159 policy_offset 160 }; 161 162 return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true); 163 } 164 165 int intel_guc_global_policies_update(struct intel_guc *guc) 166 { 167 struct __guc_ads_blob *blob = guc->ads_blob; 168 struct intel_gt *gt = guc_to_gt(guc); 169 intel_wakeref_t wakeref; 170 int ret; 171 172 if (!blob) 173 return -EOPNOTSUPP; 174 175 GEM_BUG_ON(!blob->ads.scheduler_policies); 176 177 guc_policies_init(guc, &blob->policies); 178 179 if (!intel_guc_is_ready(guc)) 180 return 0; 181 182 with_intel_runtime_pm(>->i915->runtime_pm, wakeref) 183 ret = guc_action_policies_update(guc, blob->ads.scheduler_policies); 184 185 return ret; 186 } 187 188 static void guc_mapping_table_init(struct intel_gt *gt, 189 struct guc_gt_system_info *system_info) 190 { 191 unsigned int i, j; 192 struct intel_engine_cs *engine; 193 enum intel_engine_id id; 194 195 /* Table must be set to invalid values for entries not used */ 196 for (i = 0; i < GUC_MAX_ENGINE_CLASSES; ++i) 197 for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; ++j) 198 system_info->mapping_table[i][j] = 199 GUC_MAX_INSTANCES_PER_CLASS; 200 201 for_each_engine(engine, gt, id) { 202 u8 guc_class = engine_class_to_guc_class(engine->class); 203 204 system_info->mapping_table[guc_class][ilog2(engine->logical_mask)] = 205 engine->instance; 206 } 207 } 208 209 /* 210 * The save/restore register list must be pre-calculated to a temporary 211 * buffer before it can be copied inside the ADS. 212 */ 213 struct temp_regset { 214 /* 215 * ptr to the section of the storage for the engine currently being 216 * worked on 217 */ 218 struct guc_mmio_reg *registers; 219 /* ptr to the base of the allocated storage for all engines */ 220 struct guc_mmio_reg *storage; 221 u32 storage_used; 222 u32 storage_max; 223 }; 224 225 static int guc_mmio_reg_cmp(const void *a, const void *b) 226 { 227 const struct guc_mmio_reg *ra = a; 228 const struct guc_mmio_reg *rb = b; 229 230 return (int)ra->offset - (int)rb->offset; 231 } 232 233 static struct guc_mmio_reg * __must_check 234 __mmio_reg_add(struct temp_regset *regset, struct guc_mmio_reg *reg) 235 { 236 u32 pos = regset->storage_used; 237 struct guc_mmio_reg *slot; 238 239 if (pos >= regset->storage_max) { 240 size_t size = ALIGN((pos + 1) * sizeof(*slot), PAGE_SIZE); 241 struct guc_mmio_reg *r = krealloc(regset->storage, 242 size, GFP_KERNEL); 243 if (!r) { 244 WARN_ONCE(1, "Incomplete regset list: can't add register (%d)\n", 245 -ENOMEM); 246 return ERR_PTR(-ENOMEM); 247 } 248 249 regset->registers = r + (regset->registers - regset->storage); 250 regset->storage = r; 251 regset->storage_max = size / sizeof(*slot); 252 } 253 254 slot = ®set->storage[pos]; 255 regset->storage_used++; 256 *slot = *reg; 257 258 return slot; 259 } 260 261 static long __must_check guc_mmio_reg_add(struct temp_regset *regset, 262 u32 offset, u32 flags) 263 { 264 u32 count = regset->storage_used - (regset->registers - regset->storage); 265 struct guc_mmio_reg reg = { 266 .offset = offset, 267 .flags = flags, 268 }; 269 struct guc_mmio_reg *slot; 270 271 /* 272 * The mmio list is built using separate lists within the driver. 273 * It's possible that at some point we may attempt to add the same 274 * register more than once. Do not consider this an error; silently 275 * move on if the register is already in the list. 276 */ 277 if (bsearch(®, regset->registers, count, 278 sizeof(reg), guc_mmio_reg_cmp)) 279 return 0; 280 281 slot = __mmio_reg_add(regset, ®); 282 if (IS_ERR(slot)) 283 return PTR_ERR(slot); 284 285 while (slot-- > regset->registers) { 286 GEM_BUG_ON(slot[0].offset == slot[1].offset); 287 if (slot[1].offset > slot[0].offset) 288 break; 289 290 swap(slot[1], slot[0]); 291 } 292 293 return 0; 294 } 295 296 #define GUC_MMIO_REG_ADD(regset, reg, masked) \ 297 guc_mmio_reg_add(regset, \ 298 i915_mmio_reg_offset((reg)), \ 299 (masked) ? GUC_REGSET_MASKED : 0) 300 301 static int guc_mmio_regset_init(struct temp_regset *regset, 302 struct intel_engine_cs *engine) 303 { 304 const u32 base = engine->mmio_base; 305 struct i915_wa_list *wal = &engine->wa_list; 306 struct i915_wa *wa; 307 unsigned int i; 308 int ret = 0; 309 310 /* 311 * Each engine's registers point to a new start relative to 312 * storage 313 */ 314 regset->registers = regset->storage + regset->storage_used; 315 316 ret |= GUC_MMIO_REG_ADD(regset, RING_MODE_GEN7(base), true); 317 ret |= GUC_MMIO_REG_ADD(regset, RING_HWS_PGA(base), false); 318 ret |= GUC_MMIO_REG_ADD(regset, RING_IMR(base), false); 319 320 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) 321 ret |= GUC_MMIO_REG_ADD(regset, wa->reg, wa->masked_reg); 322 323 /* Be extra paranoid and include all whitelist registers. */ 324 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) 325 ret |= GUC_MMIO_REG_ADD(regset, 326 RING_FORCE_TO_NONPRIV(base, i), 327 false); 328 329 /* add in local MOCS registers */ 330 for (i = 0; i < GEN9_LNCFCMOCS_REG_COUNT; i++) 331 ret |= GUC_MMIO_REG_ADD(regset, GEN9_LNCFCMOCS(i), false); 332 333 return ret ? -1 : 0; 334 } 335 336 static long guc_mmio_reg_state_create(struct intel_guc *guc) 337 { 338 struct intel_gt *gt = guc_to_gt(guc); 339 struct intel_engine_cs *engine; 340 enum intel_engine_id id; 341 struct temp_regset temp_set = {}; 342 long total = 0; 343 long ret; 344 345 for_each_engine(engine, gt, id) { 346 u32 used = temp_set.storage_used; 347 348 ret = guc_mmio_regset_init(&temp_set, engine); 349 if (ret < 0) 350 goto fail_regset_init; 351 352 guc->ads_regset_count[id] = temp_set.storage_used - used; 353 total += guc->ads_regset_count[id]; 354 } 355 356 guc->ads_regset = temp_set.storage; 357 358 drm_dbg(&guc_to_gt(guc)->i915->drm, "Used %zu KB for temporary ADS regset\n", 359 (temp_set.storage_max * sizeof(struct guc_mmio_reg)) >> 10); 360 361 return total * sizeof(struct guc_mmio_reg); 362 363 fail_regset_init: 364 kfree(temp_set.storage); 365 return ret; 366 } 367 368 static void guc_mmio_reg_state_init(struct intel_guc *guc, 369 struct __guc_ads_blob *blob) 370 { 371 struct intel_gt *gt = guc_to_gt(guc); 372 struct intel_engine_cs *engine; 373 struct guc_mmio_reg *ads_registers; 374 enum intel_engine_id id; 375 u32 addr_ggtt, offset; 376 377 offset = guc_ads_regset_offset(guc); 378 addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset; 379 ads_registers = (struct guc_mmio_reg *)(((u8 *)blob) + offset); 380 381 memcpy(ads_registers, guc->ads_regset, guc->ads_regset_size); 382 383 for_each_engine(engine, gt, id) { 384 u32 count = guc->ads_regset_count[id]; 385 struct guc_mmio_reg_set *ads_reg_set; 386 u8 guc_class; 387 388 /* Class index is checked in class converter */ 389 GEM_BUG_ON(engine->instance >= GUC_MAX_INSTANCES_PER_CLASS); 390 391 guc_class = engine_class_to_guc_class(engine->class); 392 ads_reg_set = &blob->ads.reg_state_list[guc_class][engine->instance]; 393 394 if (!count) { 395 ads_reg_set->address = 0; 396 ads_reg_set->count = 0; 397 continue; 398 } 399 400 ads_reg_set->address = addr_ggtt; 401 ads_reg_set->count = count; 402 403 addr_ggtt += count * sizeof(struct guc_mmio_reg); 404 } 405 } 406 407 static void fill_engine_enable_masks(struct intel_gt *gt, 408 struct guc_gt_system_info *info) 409 { 410 info->engine_enabled_masks[GUC_RENDER_CLASS] = 1; 411 info->engine_enabled_masks[GUC_BLITTER_CLASS] = 1; 412 info->engine_enabled_masks[GUC_VIDEO_CLASS] = VDBOX_MASK(gt); 413 info->engine_enabled_masks[GUC_VIDEOENHANCE_CLASS] = VEBOX_MASK(gt); 414 } 415 416 #define LR_HW_CONTEXT_SIZE (80 * sizeof(u32)) 417 #define LRC_SKIP_SIZE (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE) 418 static int guc_prep_golden_context(struct intel_guc *guc, 419 struct __guc_ads_blob *blob) 420 { 421 struct intel_gt *gt = guc_to_gt(guc); 422 u32 addr_ggtt, offset; 423 u32 total_size = 0, alloc_size, real_size; 424 u8 engine_class, guc_class; 425 struct guc_gt_system_info *info, local_info; 426 427 /* 428 * Reserve the memory for the golden contexts and point GuC at it but 429 * leave it empty for now. The context data will be filled in later 430 * once there is something available to put there. 431 * 432 * Note that the HWSP and ring context are not included. 433 * 434 * Note also that the storage must be pinned in the GGTT, so that the 435 * address won't change after GuC has been told where to find it. The 436 * GuC will also validate that the LRC base + size fall within the 437 * allowed GGTT range. 438 */ 439 if (blob) { 440 offset = guc_ads_golden_ctxt_offset(guc); 441 addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset; 442 info = &blob->system_info; 443 } else { 444 memset(&local_info, 0, sizeof(local_info)); 445 info = &local_info; 446 fill_engine_enable_masks(gt, info); 447 } 448 449 for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) { 450 if (engine_class == OTHER_CLASS) 451 continue; 452 453 guc_class = engine_class_to_guc_class(engine_class); 454 455 if (!info->engine_enabled_masks[guc_class]) 456 continue; 457 458 real_size = intel_engine_context_size(gt, engine_class); 459 alloc_size = PAGE_ALIGN(real_size); 460 total_size += alloc_size; 461 462 if (!blob) 463 continue; 464 465 /* 466 * This interface is slightly confusing. We need to pass the 467 * base address of the full golden context and the size of just 468 * the engine state, which is the section of the context image 469 * that starts after the execlists context. This is required to 470 * allow the GuC to restore just the engine state when a 471 * watchdog reset occurs. 472 * We calculate the engine state size by removing the size of 473 * what comes before it in the context image (which is identical 474 * on all engines). 475 */ 476 blob->ads.eng_state_size[guc_class] = real_size - LRC_SKIP_SIZE; 477 blob->ads.golden_context_lrca[guc_class] = addr_ggtt; 478 addr_ggtt += alloc_size; 479 } 480 481 if (!blob) 482 return total_size; 483 484 GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size); 485 return total_size; 486 } 487 488 static struct intel_engine_cs *find_engine_state(struct intel_gt *gt, u8 engine_class) 489 { 490 struct intel_engine_cs *engine; 491 enum intel_engine_id id; 492 493 for_each_engine(engine, gt, id) { 494 if (engine->class != engine_class) 495 continue; 496 497 if (!engine->default_state) 498 continue; 499 500 return engine; 501 } 502 503 return NULL; 504 } 505 506 static void guc_init_golden_context(struct intel_guc *guc) 507 { 508 struct __guc_ads_blob *blob = guc->ads_blob; 509 struct intel_engine_cs *engine; 510 struct intel_gt *gt = guc_to_gt(guc); 511 u32 addr_ggtt, offset; 512 u32 total_size = 0, alloc_size, real_size; 513 u8 engine_class, guc_class; 514 u8 *ptr; 515 516 if (!intel_uc_uses_guc_submission(>->uc)) 517 return; 518 519 GEM_BUG_ON(!blob); 520 521 /* 522 * Go back and fill in the golden context data now that it is 523 * available. 524 */ 525 offset = guc_ads_golden_ctxt_offset(guc); 526 addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset; 527 ptr = ((u8 *)blob) + offset; 528 529 for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) { 530 if (engine_class == OTHER_CLASS) 531 continue; 532 533 guc_class = engine_class_to_guc_class(engine_class); 534 535 if (!blob->system_info.engine_enabled_masks[guc_class]) 536 continue; 537 538 real_size = intel_engine_context_size(gt, engine_class); 539 alloc_size = PAGE_ALIGN(real_size); 540 total_size += alloc_size; 541 542 engine = find_engine_state(gt, engine_class); 543 if (!engine) { 544 drm_err(>->i915->drm, "No engine state recorded for class %d!\n", 545 engine_class); 546 blob->ads.eng_state_size[guc_class] = 0; 547 blob->ads.golden_context_lrca[guc_class] = 0; 548 continue; 549 } 550 551 GEM_BUG_ON(blob->ads.eng_state_size[guc_class] != 552 real_size - LRC_SKIP_SIZE); 553 GEM_BUG_ON(blob->ads.golden_context_lrca[guc_class] != addr_ggtt); 554 addr_ggtt += alloc_size; 555 556 shmem_read(engine->default_state, 0, ptr, real_size); 557 ptr += alloc_size; 558 } 559 560 GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size); 561 } 562 563 static void guc_capture_list_init(struct intel_guc *guc, struct __guc_ads_blob *blob) 564 { 565 int i, j; 566 u32 addr_ggtt, offset; 567 568 offset = guc_ads_capture_offset(guc); 569 addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset; 570 571 /* FIXME: Populate a proper capture list */ 572 573 for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) { 574 for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) { 575 blob->ads.capture_instance[i][j] = addr_ggtt; 576 blob->ads.capture_class[i][j] = addr_ggtt; 577 } 578 579 blob->ads.capture_global[i] = addr_ggtt; 580 } 581 } 582 583 static void __guc_ads_init(struct intel_guc *guc) 584 { 585 struct intel_gt *gt = guc_to_gt(guc); 586 struct drm_i915_private *i915 = gt->i915; 587 struct __guc_ads_blob *blob = guc->ads_blob; 588 u32 base; 589 590 /* GuC scheduling policies */ 591 guc_policies_init(guc, &blob->policies); 592 593 /* System info */ 594 fill_engine_enable_masks(gt, &blob->system_info); 595 596 blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED] = 597 hweight8(gt->info.sseu.slice_mask); 598 blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK] = 599 gt->info.vdbox_sfc_access; 600 601 if (GRAPHICS_VER(i915) >= 12 && !IS_DGFX(i915)) { 602 u32 distdbreg = intel_uncore_read(gt->uncore, 603 GEN12_DIST_DBS_POPULATED); 604 blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI] = 605 ((distdbreg >> GEN12_DOORBELLS_PER_SQIDI_SHIFT) & 606 GEN12_DOORBELLS_PER_SQIDI) + 1; 607 } 608 609 /* Golden contexts for re-initialising after a watchdog reset */ 610 guc_prep_golden_context(guc, blob); 611 612 guc_mapping_table_init(guc_to_gt(guc), &blob->system_info); 613 614 base = intel_guc_ggtt_offset(guc, guc->ads_vma); 615 616 /* Capture list for hang debug */ 617 guc_capture_list_init(guc, blob); 618 619 /* ADS */ 620 blob->ads.scheduler_policies = base + ptr_offset(blob, policies); 621 blob->ads.gt_system_info = base + ptr_offset(blob, system_info); 622 623 /* MMIO save/restore list */ 624 guc_mmio_reg_state_init(guc, blob); 625 626 /* Private Data */ 627 blob->ads.private_data = base + guc_ads_private_data_offset(guc); 628 629 i915_gem_object_flush_map(guc->ads_vma->obj); 630 } 631 632 /** 633 * intel_guc_ads_create() - allocates and initializes GuC ADS. 634 * @guc: intel_guc struct 635 * 636 * GuC needs memory block (Additional Data Struct), where it will store 637 * some data. Allocate and initialize such memory block for GuC use. 638 */ 639 int intel_guc_ads_create(struct intel_guc *guc) 640 { 641 u32 size; 642 int ret; 643 644 GEM_BUG_ON(guc->ads_vma); 645 646 /* 647 * Create reg state size dynamically on system memory to be copied to 648 * the final ads blob on gt init/reset 649 */ 650 ret = guc_mmio_reg_state_create(guc); 651 if (ret < 0) 652 return ret; 653 guc->ads_regset_size = ret; 654 655 /* Likewise the golden contexts: */ 656 ret = guc_prep_golden_context(guc, NULL); 657 if (ret < 0) 658 return ret; 659 guc->ads_golden_ctxt_size = ret; 660 661 /* Now the total size can be determined: */ 662 size = guc_ads_blob_size(guc); 663 664 ret = intel_guc_allocate_and_map_vma(guc, size, &guc->ads_vma, 665 (void **)&guc->ads_blob); 666 if (ret) 667 return ret; 668 669 __guc_ads_init(guc); 670 671 return 0; 672 } 673 674 void intel_guc_ads_init_late(struct intel_guc *guc) 675 { 676 /* 677 * The golden context setup requires the saved engine state from 678 * __engines_record_defaults(). However, that requires engines to be 679 * operational which means the ADS must already have been configured. 680 * Fortunately, the golden context state is not needed until a hang 681 * occurs, so it can be filled in during this late init phase. 682 */ 683 guc_init_golden_context(guc); 684 } 685 686 void intel_guc_ads_destroy(struct intel_guc *guc) 687 { 688 i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP); 689 guc->ads_blob = NULL; 690 kfree(guc->ads_regset); 691 } 692 693 static void guc_ads_private_data_reset(struct intel_guc *guc) 694 { 695 u32 size; 696 697 size = guc_ads_private_data_size(guc); 698 if (!size) 699 return; 700 701 memset((void *)guc->ads_blob + guc_ads_private_data_offset(guc), 0, 702 size); 703 } 704 705 /** 706 * intel_guc_ads_reset() - prepares GuC Additional Data Struct for reuse 707 * @guc: intel_guc struct 708 * 709 * GuC stores some data in ADS, which might be stale after a reset. 710 * Reinitialize whole ADS in case any part of it was corrupted during 711 * previous GuC run. 712 */ 713 void intel_guc_ads_reset(struct intel_guc *guc) 714 { 715 if (!guc->ads_vma) 716 return; 717 718 __guc_ads_init(guc); 719 720 guc_ads_private_data_reset(guc); 721 } 722 723 u32 intel_guc_engine_usage_offset(struct intel_guc *guc) 724 { 725 struct __guc_ads_blob *blob = guc->ads_blob; 726 u32 base = intel_guc_ggtt_offset(guc, guc->ads_vma); 727 u32 offset = base + ptr_offset(blob, engine_usage); 728 729 return offset; 730 } 731 732 struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine) 733 { 734 struct intel_guc *guc = &engine->gt->uc.guc; 735 struct __guc_ads_blob *blob = guc->ads_blob; 736 u8 guc_class = engine_class_to_guc_class(engine->class); 737 738 return &blob->engine_usage.engines[guc_class][ilog2(engine->logical_mask)]; 739 } 740