1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2016-2019 Intel Corporation 4 */ 5 6 #include "gt/intel_gt.h" 7 #include "gt/intel_reset.h" 8 #include "intel_guc.h" 9 #include "intel_guc_ads.h" 10 #include "intel_guc_submission.h" 11 #include "intel_uc.h" 12 13 #include "i915_drv.h" 14 15 static const struct intel_uc_ops uc_ops_off; 16 static const struct intel_uc_ops uc_ops_on; 17 18 static void uc_expand_default_options(struct intel_uc *uc) 19 { 20 struct drm_i915_private *i915 = uc_to_gt(uc)->i915; 21 22 if (i915->params.enable_guc != -1) 23 return; 24 25 /* Don't enable GuC/HuC on pre-Gen12 */ 26 if (GRAPHICS_VER(i915) < 12) { 27 i915->params.enable_guc = 0; 28 return; 29 } 30 31 /* Don't enable GuC/HuC on older Gen12 platforms */ 32 if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) { 33 i915->params.enable_guc = 0; 34 return; 35 } 36 37 /* Intermediate platforms are HuC authentication only */ 38 if (IS_ALDERLAKE_S(i915)) { 39 i915->params.enable_guc = ENABLE_GUC_LOAD_HUC; 40 return; 41 } 42 43 /* Default: enable HuC authentication and GuC submission */ 44 i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION; 45 } 46 47 /* Reset GuC providing us with fresh state for both GuC and HuC. 48 */ 49 static int __intel_uc_reset_hw(struct intel_uc *uc) 50 { 51 struct intel_gt *gt = uc_to_gt(uc); 52 int ret; 53 u32 guc_status; 54 55 ret = i915_inject_probe_error(gt->i915, -ENXIO); 56 if (ret) 57 return ret; 58 59 ret = intel_reset_guc(gt); 60 if (ret) { 61 DRM_ERROR("Failed to reset GuC, ret = %d\n", ret); 62 return ret; 63 } 64 65 guc_status = intel_uncore_read(gt->uncore, GUC_STATUS); 66 WARN(!(guc_status & GS_MIA_IN_RESET), 67 "GuC status: 0x%x, MIA core expected to be in reset\n", 68 guc_status); 69 70 return ret; 71 } 72 73 static void __confirm_options(struct intel_uc *uc) 74 { 75 struct drm_i915_private *i915 = uc_to_gt(uc)->i915; 76 77 drm_dbg(&i915->drm, 78 "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n", 79 i915->params.enable_guc, 80 yesno(intel_uc_wants_guc(uc)), 81 yesno(intel_uc_wants_guc_submission(uc)), 82 yesno(intel_uc_wants_huc(uc)), 83 yesno(intel_uc_wants_guc_slpc(uc))); 84 85 if (i915->params.enable_guc == 0) { 86 GEM_BUG_ON(intel_uc_wants_guc(uc)); 87 GEM_BUG_ON(intel_uc_wants_guc_submission(uc)); 88 GEM_BUG_ON(intel_uc_wants_huc(uc)); 89 GEM_BUG_ON(intel_uc_wants_guc_slpc(uc)); 90 return; 91 } 92 93 if (!intel_uc_supports_guc(uc)) 94 drm_info(&i915->drm, 95 "Incompatible option enable_guc=%d - %s\n", 96 i915->params.enable_guc, "GuC is not supported!"); 97 98 if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC && 99 !intel_uc_supports_huc(uc)) 100 drm_info(&i915->drm, 101 "Incompatible option enable_guc=%d - %s\n", 102 i915->params.enable_guc, "HuC is not supported!"); 103 104 if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION && 105 !intel_uc_supports_guc_submission(uc)) 106 drm_info(&i915->drm, 107 "Incompatible option enable_guc=%d - %s\n", 108 i915->params.enable_guc, "GuC submission is N/A"); 109 110 if (i915->params.enable_guc & ~ENABLE_GUC_MASK) 111 drm_info(&i915->drm, 112 "Incompatible option enable_guc=%d - %s\n", 113 i915->params.enable_guc, "undocumented flag"); 114 } 115 116 void intel_uc_init_early(struct intel_uc *uc) 117 { 118 uc_expand_default_options(uc); 119 120 intel_guc_init_early(&uc->guc); 121 intel_huc_init_early(&uc->huc); 122 123 __confirm_options(uc); 124 125 if (intel_uc_wants_guc(uc)) 126 uc->ops = &uc_ops_on; 127 else 128 uc->ops = &uc_ops_off; 129 } 130 131 void intel_uc_init_late(struct intel_uc *uc) 132 { 133 intel_guc_init_late(&uc->guc); 134 } 135 136 void intel_uc_driver_late_release(struct intel_uc *uc) 137 { 138 } 139 140 /** 141 * intel_uc_init_mmio - setup uC MMIO access 142 * @uc: the intel_uc structure 143 * 144 * Setup minimal state necessary for MMIO accesses later in the 145 * initialization sequence. 146 */ 147 void intel_uc_init_mmio(struct intel_uc *uc) 148 { 149 intel_guc_init_send_regs(&uc->guc); 150 } 151 152 static void __uc_capture_load_err_log(struct intel_uc *uc) 153 { 154 struct intel_guc *guc = &uc->guc; 155 156 if (guc->log.vma && !uc->load_err_log) 157 uc->load_err_log = i915_gem_object_get(guc->log.vma->obj); 158 } 159 160 static void __uc_free_load_err_log(struct intel_uc *uc) 161 { 162 struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log); 163 164 if (log) 165 i915_gem_object_put(log); 166 } 167 168 void intel_uc_driver_remove(struct intel_uc *uc) 169 { 170 intel_uc_fini_hw(uc); 171 intel_uc_fini(uc); 172 __uc_free_load_err_log(uc); 173 } 174 175 /* 176 * Events triggered while CT buffers are disabled are logged in the SCRATCH_15 177 * register using the same bits used in the CT message payload. Since our 178 * communication channel with guc is turned off at this point, we can save the 179 * message and handle it after we turn it back on. 180 */ 181 static void guc_clear_mmio_msg(struct intel_guc *guc) 182 { 183 intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0); 184 } 185 186 static void guc_get_mmio_msg(struct intel_guc *guc) 187 { 188 u32 val; 189 190 spin_lock_irq(&guc->irq_lock); 191 192 val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15)); 193 guc->mmio_msg |= val & guc->msg_enabled_mask; 194 195 /* 196 * clear all events, including the ones we're not currently servicing, 197 * to make sure we don't try to process a stale message if we enable 198 * handling of more events later. 199 */ 200 guc_clear_mmio_msg(guc); 201 202 spin_unlock_irq(&guc->irq_lock); 203 } 204 205 static void guc_handle_mmio_msg(struct intel_guc *guc) 206 { 207 /* we need communication to be enabled to reply to GuC */ 208 GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct)); 209 210 spin_lock_irq(&guc->irq_lock); 211 if (guc->mmio_msg) { 212 intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1); 213 guc->mmio_msg = 0; 214 } 215 spin_unlock_irq(&guc->irq_lock); 216 } 217 218 static int guc_enable_communication(struct intel_guc *guc) 219 { 220 struct intel_gt *gt = guc_to_gt(guc); 221 struct drm_i915_private *i915 = gt->i915; 222 int ret; 223 224 GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct)); 225 226 ret = i915_inject_probe_error(i915, -ENXIO); 227 if (ret) 228 return ret; 229 230 ret = intel_guc_ct_enable(&guc->ct); 231 if (ret) 232 return ret; 233 234 /* check for mmio messages received before/during the CT enable */ 235 guc_get_mmio_msg(guc); 236 guc_handle_mmio_msg(guc); 237 238 intel_guc_enable_interrupts(guc); 239 240 /* check for CT messages received before we enabled interrupts */ 241 spin_lock_irq(>->irq_lock); 242 intel_guc_ct_event_handler(&guc->ct); 243 spin_unlock_irq(>->irq_lock); 244 245 drm_dbg(&i915->drm, "GuC communication enabled\n"); 246 247 return 0; 248 } 249 250 static void guc_disable_communication(struct intel_guc *guc) 251 { 252 struct drm_i915_private *i915 = guc_to_gt(guc)->i915; 253 254 /* 255 * Events generated during or after CT disable are logged by guc in 256 * via mmio. Make sure the register is clear before disabling CT since 257 * all events we cared about have already been processed via CT. 258 */ 259 guc_clear_mmio_msg(guc); 260 261 intel_guc_disable_interrupts(guc); 262 263 intel_guc_ct_disable(&guc->ct); 264 265 /* 266 * Check for messages received during/after the CT disable. We do not 267 * expect any messages to have arrived via CT between the interrupt 268 * disable and the CT disable because GuC should've been idle until we 269 * triggered the CT disable protocol. 270 */ 271 guc_get_mmio_msg(guc); 272 273 drm_dbg(&i915->drm, "GuC communication disabled\n"); 274 } 275 276 static void __uc_fetch_firmwares(struct intel_uc *uc) 277 { 278 int err; 279 280 GEM_BUG_ON(!intel_uc_wants_guc(uc)); 281 282 err = intel_uc_fw_fetch(&uc->guc.fw); 283 if (err) { 284 /* Make sure we transition out of transient "SELECTED" state */ 285 if (intel_uc_wants_huc(uc)) { 286 drm_dbg(&uc_to_gt(uc)->i915->drm, 287 "Failed to fetch GuC: %d disabling HuC\n", err); 288 intel_uc_fw_change_status(&uc->huc.fw, 289 INTEL_UC_FIRMWARE_ERROR); 290 } 291 292 return; 293 } 294 295 if (intel_uc_wants_huc(uc)) 296 intel_uc_fw_fetch(&uc->huc.fw); 297 } 298 299 static void __uc_cleanup_firmwares(struct intel_uc *uc) 300 { 301 intel_uc_fw_cleanup_fetch(&uc->huc.fw); 302 intel_uc_fw_cleanup_fetch(&uc->guc.fw); 303 } 304 305 static int __uc_init(struct intel_uc *uc) 306 { 307 struct intel_guc *guc = &uc->guc; 308 struct intel_huc *huc = &uc->huc; 309 int ret; 310 311 GEM_BUG_ON(!intel_uc_wants_guc(uc)); 312 313 if (!intel_uc_uses_guc(uc)) 314 return 0; 315 316 if (i915_inject_probe_failure(uc_to_gt(uc)->i915)) 317 return -ENOMEM; 318 319 ret = intel_guc_init(guc); 320 if (ret) 321 return ret; 322 323 if (intel_uc_uses_huc(uc)) { 324 ret = intel_huc_init(huc); 325 if (ret) 326 goto out_guc; 327 } 328 329 return 0; 330 331 out_guc: 332 intel_guc_fini(guc); 333 return ret; 334 } 335 336 static void __uc_fini(struct intel_uc *uc) 337 { 338 intel_huc_fini(&uc->huc); 339 intel_guc_fini(&uc->guc); 340 } 341 342 static int __uc_sanitize(struct intel_uc *uc) 343 { 344 struct intel_guc *guc = &uc->guc; 345 struct intel_huc *huc = &uc->huc; 346 347 GEM_BUG_ON(!intel_uc_supports_guc(uc)); 348 349 intel_huc_sanitize(huc); 350 intel_guc_sanitize(guc); 351 352 return __intel_uc_reset_hw(uc); 353 } 354 355 /* Initialize and verify the uC regs related to uC positioning in WOPCM */ 356 static int uc_init_wopcm(struct intel_uc *uc) 357 { 358 struct intel_gt *gt = uc_to_gt(uc); 359 struct intel_uncore *uncore = gt->uncore; 360 u32 base = intel_wopcm_guc_base(>->i915->wopcm); 361 u32 size = intel_wopcm_guc_size(>->i915->wopcm); 362 u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0; 363 u32 mask; 364 int err; 365 366 if (unlikely(!base || !size)) { 367 i915_probe_error(gt->i915, "Unsuccessful WOPCM partitioning\n"); 368 return -E2BIG; 369 } 370 371 GEM_BUG_ON(!intel_uc_supports_guc(uc)); 372 GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK)); 373 GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK); 374 GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK)); 375 GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK); 376 377 err = i915_inject_probe_error(gt->i915, -ENXIO); 378 if (err) 379 return err; 380 381 mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED; 382 err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask, 383 size | GUC_WOPCM_SIZE_LOCKED); 384 if (err) 385 goto err_out; 386 387 mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent; 388 err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET, 389 base | huc_agent, mask, 390 base | huc_agent | 391 GUC_WOPCM_OFFSET_VALID); 392 if (err) 393 goto err_out; 394 395 return 0; 396 397 err_out: 398 i915_probe_error(gt->i915, "Failed to init uC WOPCM registers!\n"); 399 i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET", 400 i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET), 401 intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET)); 402 i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE", 403 i915_mmio_reg_offset(GUC_WOPCM_SIZE), 404 intel_uncore_read(uncore, GUC_WOPCM_SIZE)); 405 406 return err; 407 } 408 409 static bool uc_is_wopcm_locked(struct intel_uc *uc) 410 { 411 struct intel_gt *gt = uc_to_gt(uc); 412 struct intel_uncore *uncore = gt->uncore; 413 414 return (intel_uncore_read(uncore, GUC_WOPCM_SIZE) & GUC_WOPCM_SIZE_LOCKED) || 415 (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID); 416 } 417 418 static int __uc_check_hw(struct intel_uc *uc) 419 { 420 if (!intel_uc_supports_guc(uc)) 421 return 0; 422 423 /* 424 * We can silently continue without GuC only if it was never enabled 425 * before on this system after reboot, otherwise we risk GPU hangs. 426 * To check if GuC was loaded before we look at WOPCM registers. 427 */ 428 if (uc_is_wopcm_locked(uc)) 429 return -EIO; 430 431 return 0; 432 } 433 434 static int __uc_init_hw(struct intel_uc *uc) 435 { 436 struct drm_i915_private *i915 = uc_to_gt(uc)->i915; 437 struct intel_guc *guc = &uc->guc; 438 struct intel_huc *huc = &uc->huc; 439 int ret, attempts; 440 441 GEM_BUG_ON(!intel_uc_supports_guc(uc)); 442 GEM_BUG_ON(!intel_uc_wants_guc(uc)); 443 444 if (!intel_uc_fw_is_loadable(&guc->fw)) { 445 ret = __uc_check_hw(uc) || 446 intel_uc_fw_is_overridden(&guc->fw) || 447 intel_uc_wants_guc_submission(uc) ? 448 intel_uc_fw_status_to_error(guc->fw.status) : 0; 449 goto err_out; 450 } 451 452 ret = uc_init_wopcm(uc); 453 if (ret) 454 goto err_out; 455 456 intel_guc_reset_interrupts(guc); 457 458 /* WaEnableuKernelHeaderValidFix:skl */ 459 /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ 460 if (GRAPHICS_VER(i915) == 9) 461 attempts = 3; 462 else 463 attempts = 1; 464 465 while (attempts--) { 466 /* 467 * Always reset the GuC just before (re)loading, so 468 * that the state and timing are fairly predictable 469 */ 470 ret = __uc_sanitize(uc); 471 if (ret) 472 goto err_out; 473 474 intel_huc_fw_upload(huc); 475 intel_guc_ads_reset(guc); 476 intel_guc_write_params(guc); 477 ret = intel_guc_fw_upload(guc); 478 if (ret == 0) 479 break; 480 481 DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and " 482 "retry %d more time(s)\n", ret, attempts); 483 } 484 485 /* Did we succeded or run out of retries? */ 486 if (ret) 487 goto err_log_capture; 488 489 ret = guc_enable_communication(guc); 490 if (ret) 491 goto err_log_capture; 492 493 intel_huc_auth(huc); 494 495 if (intel_uc_uses_guc_submission(uc)) 496 intel_guc_submission_enable(guc); 497 498 if (intel_uc_uses_guc_slpc(uc)) { 499 ret = intel_guc_slpc_enable(&guc->slpc); 500 if (ret) 501 goto err_submission; 502 } 503 504 drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n", 505 intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path, 506 guc->fw.major_ver_found, guc->fw.minor_ver_found, 507 "submission", 508 enableddisabled(intel_uc_uses_guc_submission(uc))); 509 510 drm_info(&i915->drm, "GuC SLPC: %s\n", 511 enableddisabled(intel_uc_uses_guc_slpc(uc))); 512 513 if (intel_uc_uses_huc(uc)) { 514 drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n", 515 intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC), 516 huc->fw.path, 517 huc->fw.major_ver_found, huc->fw.minor_ver_found, 518 "authenticated", 519 yesno(intel_huc_is_authenticated(huc))); 520 } 521 522 return 0; 523 524 /* 525 * We've failed to load the firmware :( 526 */ 527 err_submission: 528 intel_guc_submission_disable(guc); 529 err_log_capture: 530 __uc_capture_load_err_log(uc); 531 err_out: 532 __uc_sanitize(uc); 533 534 if (!ret) { 535 drm_notice(&i915->drm, "GuC is uninitialized\n"); 536 /* We want to run without GuC submission */ 537 return 0; 538 } 539 540 i915_probe_error(i915, "GuC initialization failed %d\n", ret); 541 542 /* We want to keep KMS alive */ 543 return -EIO; 544 } 545 546 static void __uc_fini_hw(struct intel_uc *uc) 547 { 548 struct intel_guc *guc = &uc->guc; 549 550 if (!intel_guc_is_fw_running(guc)) 551 return; 552 553 if (intel_uc_uses_guc_submission(uc)) 554 intel_guc_submission_disable(guc); 555 556 __uc_sanitize(uc); 557 } 558 559 /** 560 * intel_uc_reset_prepare - Prepare for reset 561 * @uc: the intel_uc structure 562 * 563 * Preparing for full gpu reset. 564 */ 565 void intel_uc_reset_prepare(struct intel_uc *uc) 566 { 567 struct intel_guc *guc = &uc->guc; 568 569 uc->reset_in_progress = true; 570 571 /* Nothing to do if GuC isn't supported */ 572 if (!intel_uc_supports_guc(uc)) 573 return; 574 575 /* Firmware expected to be running when this function is called */ 576 if (!intel_guc_is_ready(guc)) 577 goto sanitize; 578 579 if (intel_uc_uses_guc_submission(uc)) 580 intel_guc_submission_reset_prepare(guc); 581 582 sanitize: 583 __uc_sanitize(uc); 584 } 585 586 void intel_uc_reset(struct intel_uc *uc, bool stalled) 587 { 588 struct intel_guc *guc = &uc->guc; 589 590 /* Firmware can not be running when this function is called */ 591 if (intel_uc_uses_guc_submission(uc)) 592 intel_guc_submission_reset(guc, stalled); 593 } 594 595 void intel_uc_reset_finish(struct intel_uc *uc) 596 { 597 struct intel_guc *guc = &uc->guc; 598 599 uc->reset_in_progress = false; 600 601 /* Firmware expected to be running when this function is called */ 602 if (intel_guc_is_fw_running(guc) && intel_uc_uses_guc_submission(uc)) 603 intel_guc_submission_reset_finish(guc); 604 } 605 606 void intel_uc_cancel_requests(struct intel_uc *uc) 607 { 608 struct intel_guc *guc = &uc->guc; 609 610 /* Firmware can not be running when this function is called */ 611 if (intel_uc_uses_guc_submission(uc)) 612 intel_guc_submission_cancel_requests(guc); 613 } 614 615 void intel_uc_runtime_suspend(struct intel_uc *uc) 616 { 617 struct intel_guc *guc = &uc->guc; 618 619 if (!intel_guc_is_ready(guc)) 620 return; 621 622 /* 623 * Wait for any outstanding CTB before tearing down communication /w the 624 * GuC. 625 */ 626 #define OUTSTANDING_CTB_TIMEOUT_PERIOD (HZ / 5) 627 intel_guc_wait_for_pending_msg(guc, &guc->outstanding_submission_g2h, 628 false, OUTSTANDING_CTB_TIMEOUT_PERIOD); 629 GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h)); 630 631 guc_disable_communication(guc); 632 } 633 634 void intel_uc_suspend(struct intel_uc *uc) 635 { 636 struct intel_guc *guc = &uc->guc; 637 intel_wakeref_t wakeref; 638 int err; 639 640 if (!intel_guc_is_ready(guc)) 641 return; 642 643 with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) { 644 err = intel_guc_suspend(guc); 645 if (err) 646 DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err); 647 } 648 } 649 650 static int __uc_resume(struct intel_uc *uc, bool enable_communication) 651 { 652 struct intel_guc *guc = &uc->guc; 653 struct intel_gt *gt = guc_to_gt(guc); 654 int err; 655 656 if (!intel_guc_is_fw_running(guc)) 657 return 0; 658 659 /* Make sure we enable communication if and only if it's disabled */ 660 GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct)); 661 662 if (enable_communication) 663 guc_enable_communication(guc); 664 665 /* If we are only resuming GuC communication but not reloading 666 * GuC, we need to ensure the ARAT timer interrupt is enabled 667 * again. In case of GuC reload, it is enabled during SLPC enable. 668 */ 669 if (enable_communication && intel_uc_uses_guc_slpc(uc)) 670 intel_guc_pm_intrmsk_enable(gt); 671 672 err = intel_guc_resume(guc); 673 if (err) { 674 DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err); 675 return err; 676 } 677 678 return 0; 679 } 680 681 int intel_uc_resume(struct intel_uc *uc) 682 { 683 /* 684 * When coming out of S3/S4 we sanitize and re-init the HW, so 685 * communication is already re-enabled at this point. 686 */ 687 return __uc_resume(uc, false); 688 } 689 690 int intel_uc_runtime_resume(struct intel_uc *uc) 691 { 692 /* 693 * During runtime resume we don't sanitize, so we need to re-init 694 * communication as well. 695 */ 696 return __uc_resume(uc, true); 697 } 698 699 static const struct intel_uc_ops uc_ops_off = { 700 .init_hw = __uc_check_hw, 701 }; 702 703 static const struct intel_uc_ops uc_ops_on = { 704 .sanitize = __uc_sanitize, 705 706 .init_fw = __uc_fetch_firmwares, 707 .fini_fw = __uc_cleanup_firmwares, 708 709 .init = __uc_init, 710 .fini = __uc_fini, 711 712 .init_hw = __uc_init_hw, 713 .fini_hw = __uc_fini_hw, 714 }; 715